From df41e1e0b9fb5fa38a49eff61b2263757953aaab Mon Sep 17 00:00:00 2001 From: "wenfeng.zhang" Date: Wed, 17 Apr 2024 15:15:59 +0800 Subject: [PATCH] add part-a2-free and part-a2-anchor model based on openpcdet --- cv/3d_detection/Part-A2-Anchor/README.md | 61 + cv/3d_detection/Part-A2-Free/README.md | 61 + toolbox/openpcdet/.gitignore | 31 + toolbox/openpcdet/LICENSE | 201 + toolbox/openpcdet/README-ILUVATAR.md | 6 + toolbox/openpcdet/README.md | 291 + toolbox/openpcdet/build_openpcdet.sh | 16 + toolbox/openpcdet/clean_openpcdet.sh | 10 + toolbox/openpcdet/docker/Dockerfile | 55 + toolbox/openpcdet/docker/README.md | 25 + toolbox/openpcdet/docker/cu116.Dockerfile | 83 + .../openpcdet/docs/CUSTOM_DATASET_TUTORIAL.md | 108 + toolbox/openpcdet/docs/DEMO.md | 51 + toolbox/openpcdet/docs/GETTING_STARTED.md | 273 + toolbox/openpcdet/docs/INSTALL.md | 38 + toolbox/openpcdet/docs/changelog.md | 40 + toolbox/openpcdet/docs/dataset_vs_model.png | Bin 0 -> 126450 bytes toolbox/openpcdet/docs/demo.png | Bin 0 -> 620088 bytes .../guidelines_of_approaches/bevfusion.md | 35 + .../docs/guidelines_of_approaches/mppnet.md | 73 + toolbox/openpcdet/docs/model_framework.png | Bin 0 -> 104083 bytes .../openpcdet/docs/multiple_models_demo.png | Bin 0 -> 235379 bytes toolbox/openpcdet/docs/open_mmlab.png | Bin 0 -> 262300 bytes toolbox/openpcdet/install_openpcdet.sh | 33 + toolbox/openpcdet/pcdet/__init__.py | 24 + toolbox/openpcdet/pcdet/config.py | 85 + toolbox/openpcdet/pcdet/datasets/__init__.py | 82 + .../pcdet/datasets/argo2/__init__.py | 2 + .../pcdet/datasets/argo2/argo2_dataset.py | 537 + .../datasets/argo2/argo2_utils/constants.py | 12 + .../pcdet/datasets/argo2/argo2_utils/so3.py | 141 + .../pcdet/datasets/augmentor/__init__.py | 0 .../datasets/augmentor/augmentor_utils.py | 658 + .../datasets/augmentor/data_augmentor.py | 319 + .../datasets/augmentor/database_sampler.py | 502 + .../pcdet/datasets/custom/__init__.py | 0 .../pcdet/datasets/custom/custom_dataset.py | 283 + toolbox/openpcdet/pcdet/datasets/dataset.py | 325 + .../pcdet/datasets/kitti/__init__.py | 0 .../pcdet/datasets/kitti/kitti_dataset.py | 484 + .../kitti/kitti_object_eval_python/LICENSE | 21 + .../kitti/kitti_object_eval_python/README.md | 32 + .../kitti_object_eval_python/__init__.py | 0 .../kitti/kitti_object_eval_python/eval.py | 808 + .../kitti_object_eval_python/evaluate.py | 33 + .../kitti_object_eval_python/kitti_common.py | 412 + .../kitti_object_eval_python/rotate_iou.py | 330 + .../pcdet/datasets/kitti/kitti_utils.py | 66 + .../openpcdet/pcdet/datasets/lyft/__init__.py | 0 .../pcdet/datasets/lyft/lyft_dataset.py | 303 + .../datasets/lyft/lyft_mAP_eval/__init__.py | 0 .../datasets/lyft/lyft_mAP_eval/lyft_eval.py | 435 + .../pcdet/datasets/lyft/lyft_utils.py | 332 + .../pcdet/datasets/nuscenes/__init__.py | 0 .../datasets/nuscenes/nuscenes_dataset.py | 434 + .../pcdet/datasets/nuscenes/nuscenes_utils.py | 588 + .../openpcdet/pcdet/datasets/once/__init__.py | 0 .../pcdet/datasets/once/once_dataset.py | 444 + .../datasets/once/once_eval/eval_utils.py | 53 + .../datasets/once/once_eval/evaluation.py | 420 + .../datasets/once/once_eval/iou_utils.py | 344 + .../pcdet/datasets/once/once_toolkits.py | 125 + .../pcdet/datasets/pandaset/__init__.py | 0 .../datasets/pandaset/pandaset_dataset.py | 489 + .../pcdet/datasets/processor/__init__.py | 0 .../datasets/processor/data_processor.py | 298 + .../processor/point_feature_encoder.py | 57 + .../pcdet/datasets/waymo/__init__.py | 0 .../pcdet/datasets/waymo/waymo_dataset.py | 827 + .../pcdet/datasets/waymo/waymo_eval.py | 251 + .../pcdet/datasets/waymo/waymo_utils.py | 268 + toolbox/openpcdet/pcdet/models/__init__.py | 54 + .../pcdet/models/backbones_2d/__init__.py | 7 + .../models/backbones_2d/base_bev_backbone.py | 351 + .../models/backbones_2d/fuser/__init__.py | 4 + .../models/backbones_2d/fuser/convfuser.py | 33 + .../backbones_2d/map_to_bev/__init__.py | 10 + .../map_to_bev/conv2d_collapse.py | 38 + .../map_to_bev/height_compression.py | 26 + .../map_to_bev/pointpillar_scatter.py | 73 + .../pcdet/models/backbones_3d/__init__.py | 22 + .../pcdet/models/backbones_3d/dsvt.py | 616 + .../SemanticSeg/basic_blocks.py | 65 + .../SemanticSeg/pyramid_ffn.py | 77 + .../SemanticSeg/sem_deeplabv3.py | 160 + .../focal_sparse_conv/focal_sparse_conv.py | 224 + .../focal_sparse_conv/focal_sparse_utils.py | 147 + .../pcdet/models/backbones_3d/pfe/__init__.py | 5 + .../backbones_3d/pfe/voxel_set_abstraction.py | 411 + .../models/backbones_3d/pointnet2_backbone.py | 206 + .../models/backbones_3d/spconv_backbone.py | 295 + .../models/backbones_3d/spconv_backbone_2d.py | 300 + .../backbones_3d/spconv_backbone_focal.py | 269 + .../backbones_3d/spconv_backbone_voxelnext.py | 225 + .../spconv_backbone_voxelnext2d.py | 219 + .../pcdet/models/backbones_3d/spconv_unet.py | 212 + .../pcdet/models/backbones_3d/vfe/__init__.py | 18 + .../backbones_3d/vfe/dynamic_mean_vfe.py | 76 + .../backbones_3d/vfe/dynamic_pillar_vfe.py | 240 + .../backbones_3d/vfe/dynamic_voxel_vfe.py | 106 + .../models/backbones_3d/vfe/image_vfe.py | 85 + .../vfe/image_vfe_modules/__init__.py | 0 .../vfe/image_vfe_modules/f2v/__init__.py | 5 + .../f2v/frustum_grid_generator.py | 145 + .../image_vfe_modules/f2v/frustum_to_voxel.py | 54 + .../vfe/image_vfe_modules/f2v/sampler.py | 37 + .../vfe/image_vfe_modules/ffn/__init__.py | 5 + .../vfe/image_vfe_modules/ffn/ddn/__init__.py | 5 + .../ffn/ddn/ddn_deeplabv3.py | 24 + .../image_vfe_modules/ffn/ddn/ddn_template.py | 162 + .../ffn/ddn_loss/__init__.py | 5 + .../ffn/ddn_loss/balancer.py | 50 + .../ffn/ddn_loss/ddn_loss.py | 75 + .../vfe/image_vfe_modules/ffn/depth_ffn.py | 103 + .../pcdet/models/backbones_3d/vfe/mean_vfe.py | 31 + .../models/backbones_3d/vfe/pillar_vfe.py | 123 + .../models/backbones_3d/vfe/vfe_template.py | 22 + .../pcdet/models/backbones_image/__init__.py | 4 + .../backbones_image/img_neck/__init__.py | 4 + .../img_neck/generalized_lss.py | 76 + .../pcdet/models/backbones_image/swin.py | 736 + .../pcdet/models/dense_heads/__init__.py | 21 + .../models/dense_heads/anchor_head_multi.py | 373 + .../models/dense_heads/anchor_head_single.py | 75 + .../dense_heads/anchor_head_template.py | 275 + .../pcdet/models/dense_heads/center_head.py | 416 + .../models/dense_heads/point_head_box.py | 115 + .../models/dense_heads/point_head_simple.py | 91 + .../models/dense_heads/point_head_template.py | 210 + .../dense_heads/point_intra_part_head.py | 127 + .../dense_heads/target_assigner/__init__.py | 0 .../target_assigner/anchor_generator.py | 79 + .../target_assigner/atss_target_assigner.py | 141 + .../axis_aligned_target_assigner.py | 210 + .../target_assigner/hungarian_assigner.py | 131 + .../models/dense_heads/transfusion_head.py | 479 + .../models/dense_heads/voxelnext_head.py | 559 + .../pcdet/models/detectors/PartA2_net.py | 31 + .../pcdet/models/detectors/__init__.py | 46 + .../pcdet/models/detectors/bevfusion.py | 101 + .../openpcdet/pcdet/models/detectors/caddn.py | 38 + .../pcdet/models/detectors/centerpoint.py | 50 + .../models/detectors/detector3d_template.py | 415 + .../pcdet/models/detectors/mppnet.py | 181 + .../pcdet/models/detectors/mppnet_e2e.py | 222 + .../pcdet/models/detectors/pillarnet.py | 50 + .../pcdet/models/detectors/point_rcnn.py | 30 + .../pcdet/models/detectors/pointpillar.py | 34 + .../pcdet/models/detectors/pv_rcnn.py | 36 + .../models/detectors/pv_rcnn_plusplus.py | 53 + .../pcdet/models/detectors/second_net.py | 34 + .../pcdet/models/detectors/second_net_iou.py | 177 + .../pcdet/models/detectors/transfusion.py | 50 + .../pcdet/models/detectors/voxel_rcnn.py | 37 + .../pcdet/models/detectors/voxelnext.py | 44 + .../pcdet/models/model_utils/__init__.py | 0 .../models/model_utils/basic_block_2d.py | 34 + .../models/model_utils/centernet_utils.py | 385 + .../pcdet/models/model_utils/dsvt_utils.py | 150 + .../models/model_utils/model_nms_utils.py | 107 + .../pcdet/models/model_utils/mppnet_utils.py | 420 + .../pcdet/models/model_utils/swin_utils.py | 659 + .../models/model_utils/transfusion_utils.py | 102 + .../pcdet/models/roi_heads/__init__.py | 19 + .../pcdet/models/roi_heads/mppnet_head.py | 992 ++ .../roi_heads/mppnet_memory_bank_e2e.py | 581 + .../pcdet/models/roi_heads/partA2_head.py | 224 + .../pcdet/models/roi_heads/pointrcnn_head.py | 179 + .../pcdet/models/roi_heads/pvrcnn_head.py | 175 + .../models/roi_heads/roi_head_template.py | 261 + .../pcdet/models/roi_heads/second_head.py | 188 + .../roi_heads/target_assigner/__init__.py | 0 .../target_assigner/proposal_target_layer.py | 228 + .../pcdet/models/roi_heads/voxelrcnn_head.py | 262 + .../pcdet/models/view_transforms/__init__.py | 4 + .../pcdet/models/view_transforms/depth_lss.py | 258 + toolbox/openpcdet/pcdet/ops/__init__.py | 0 .../openpcdet/pcdet/ops/bev_pool/__init__.py | 1 + .../openpcdet/pcdet/ops/bev_pool/bev_pool.py | 97 + .../pcdet/ops/bev_pool/src/bev_pool.cpp | 94 + .../pcdet/ops/bev_pool/src/bev_pool_cuda.cu | 98 + .../pcdet/ops/ingroup_inds/ingroup_inds_op.py | 31 + .../pcdet/ops/ingroup_inds/src/error.cuh | 18 + .../ops/ingroup_inds/src/ingroup_inds.cpp | 54 + .../ingroup_inds/src/ingroup_inds_kernel.cu | 77 + .../openpcdet/pcdet/ops/iou3d_nms/__init__.py | 0 .../pcdet/ops/iou3d_nms/iou3d_nms_utils.py | 189 + .../pcdet/ops/iou3d_nms/src/iou3d_cpu.cpp | 273 + .../pcdet/ops/iou3d_nms/src/iou3d_cpu.h | 11 + .../pcdet/ops/iou3d_nms/src/iou3d_nms.cpp | 235 + .../pcdet/ops/iou3d_nms/src/iou3d_nms.h | 17 + .../pcdet/ops/iou3d_nms/src/iou3d_nms_api.cpp | 20 + .../ops/iou3d_nms/src/iou3d_nms_kernel.cu | 464 + .../openpcdet/pcdet/ops/pointnet2/__init__.py | 0 .../ops/pointnet2/pointnet2_batch/__init__.py | 0 .../pointnet2_batch/pointnet2_modules.py | 174 + .../pointnet2_batch/pointnet2_utils.py | 290 + .../pointnet2_batch/src/ball_query.cpp | 39 + .../pointnet2_batch/src/ball_query_gpu.cu | 73 + .../pointnet2_batch/src/ball_query_gpu.h | 15 + .../pointnet2_batch/src/cuda_utils.h | 16 + .../pointnet2_batch/src/group_points.cpp | 36 + .../pointnet2_batch/src/group_points_gpu.cu | 92 + .../pointnet2_batch/src/group_points_gpu.h | 22 + .../pointnet2_batch/src/interpolate.cpp | 56 + .../pointnet2_batch/src/interpolate_gpu.cu | 169 + .../pointnet2_batch/src/interpolate_gpu.h | 30 + .../pointnet2_batch/src/pointnet2_api.cpp | 24 + .../pointnet2_batch/src/sampling.cpp | 46 + .../pointnet2_batch/src/sampling_gpu.cu | 260 + .../pointnet2_batch/src/sampling_gpu.h | 29 + .../ops/pointnet2/pointnet2_stack/__init__.py | 0 .../pointnet2_stack/pointnet2_modules.py | 470 + .../pointnet2_stack/pointnet2_utils.py | 457 + .../pointnet2_stack/src/ball_query.cpp | 45 + .../pointnet2_stack/src/ball_query_gpu.cu | 90 + .../pointnet2_stack/src/ball_query_gpu.h | 25 + .../pointnet2_stack/src/cuda_utils.h | 9 + .../pointnet2_stack/src/group_points.cpp | 68 + .../pointnet2_stack/src/group_points_gpu.cu | 125 + .../pointnet2_stack/src/group_points_gpu.h | 31 + .../pointnet2_stack/src/interpolate.cpp | 107 + .../pointnet2_stack/src/interpolate_gpu.cu | 196 + .../pointnet2_stack/src/interpolate_gpu.h | 39 + .../pointnet2_stack/src/pointnet2_api.cpp | 31 + .../pointnet2_stack/src/sampling.cpp | 57 + .../pointnet2_stack/src/sampling_gpu.cu | 350 + .../pointnet2_stack/src/sampling_gpu.h | 23 + .../pointnet2_stack/src/vector_pool.cpp | 200 + .../pointnet2_stack/src/vector_pool_gpu.cu | 487 + .../pointnet2_stack/src/vector_pool_gpu.h | 71 + .../pointnet2_stack/src/voxel_query.cpp | 41 + .../pointnet2_stack/src/voxel_query_gpu.cu | 113 + .../pointnet2_stack/src/voxel_query_gpu.h | 19 + .../pointnet2_stack/voxel_pool_modules.py | 131 + .../pointnet2_stack/voxel_query_utils.py | 100 + .../pcdet/ops/roiaware_pool3d/__init__.py | 0 .../roiaware_pool3d/roiaware_pool3d_utils.py | 111 + .../roiaware_pool3d/src/roiaware_pool3d.cpp | 177 + .../src/roiaware_pool3d_kernel.cu | 359 + .../pcdet/ops/roipoint_pool3d/__init__.py | 0 .../roipoint_pool3d/roipoint_pool3d_utils.py | 67 + .../roipoint_pool3d/src/roipoint_pool3d.cpp | 60 + .../src/roipoint_pool3d_kernel.cu | 165 + toolbox/openpcdet/pcdet/utils/__init__.py | 0 .../openpcdet/pcdet/utils/box_coder_utils.py | 222 + toolbox/openpcdet/pcdet/utils/box_utils.py | 440 + .../pcdet/utils/calibration_kitti.py | 125 + toolbox/openpcdet/pcdet/utils/common_utils.py | 295 + toolbox/openpcdet/pcdet/utils/commu_utils.py | 182 + toolbox/openpcdet/pcdet/utils/loss_utils.py | 649 + .../openpcdet/pcdet/utils/object3d_custom.py | 83 + .../openpcdet/pcdet/utils/object3d_kitti.py | 83 + toolbox/openpcdet/pcdet/utils/spconv_utils.py | 38 + .../openpcdet/pcdet/utils/transform_utils.py | 91 + toolbox/openpcdet/requirements.txt | 4 + toolbox/openpcdet/setup.py | 137 + toolbox/openpcdet/tools/_init_path.py | 2 + toolbox/openpcdet/tools/demo.py | 112 + .../openpcdet/tools/eval_utils/eval_utils.py | 140 + .../create_integrated_database.py | 86 + .../tools/scripts/slurm_test_mgpu.sh | 30 + .../tools/scripts/slurm_test_single.sh | 19 + .../openpcdet/tools/scripts/slurm_train.sh | 32 + .../openpcdet/tools/scripts/slurm_train_v2.sh | 30 + .../openpcdet/tools/scripts/torch_train.sh | 18 + toolbox/openpcdet/tools/test.py | 207 + toolbox/openpcdet/tools/train.py | 230 + .../train_utils/optimization/__init__.py | 68 + .../train_utils/optimization/fastai_optim.py | 264 + .../optimization/learning_schedules_fastai.py | 162 + .../tools/train_utils/train_utils.py | 272 + .../tools/visual_utils/open3d_vis_utils.py | 116 + .../tools/visual_utils/visualize_utils.py | 215 + toolbox/spconv/.gitignore | 109 + toolbox/spconv/.gitmodules | 3 + toolbox/spconv/CMakeLists.txt | 55 + toolbox/spconv/LICENSE | 201 + toolbox/spconv/README-ILUVATAR.md | 16 + toolbox/spconv/README.md | 142 + toolbox/spconv/build_spconv.sh | 33 + toolbox/spconv/clean_spconv.sh | 9 + toolbox/spconv/include/paramsgrid.h | 62 + toolbox/spconv/include/prettyprint.h | 445 + toolbox/spconv/include/pybind11_utils.h | 61 + toolbox/spconv/include/spconv/box_iou.h | 103 + toolbox/spconv/include/spconv/geometry.h | 297 + toolbox/spconv/include/spconv/indice.cu.h | 244 + toolbox/spconv/include/spconv/indice.h | 79 + toolbox/spconv/include/spconv/maxpool.h | 44 + toolbox/spconv/include/spconv/mp_helper.h | 47 + toolbox/spconv/include/spconv/nms.h | 201 + toolbox/spconv/include/spconv/nms_gpu.h | 18 + toolbox/spconv/include/spconv/point2voxel.h | 94 + toolbox/spconv/include/spconv/pool_ops.h | 97 + toolbox/spconv/include/spconv/reordering.cu.h | 161 + toolbox/spconv/include/spconv/reordering.h | 40 + toolbox/spconv/include/spconv/spconv_ops.h | 561 + .../include/tensorview/helper_kernel.cu.h | 81 + .../spconv/include/tensorview/helper_launch.h | 21 + .../spconv/include/tensorview/tensorview.h | 1146 ++ toolbox/spconv/include/torch_utils.h | 66 + toolbox/spconv/include/utility/timer.h | 54 + toolbox/spconv/install_spconv.sh | 35 + toolbox/spconv/setup.py | 89 + toolbox/spconv/spconv/__init__.py | 97 + toolbox/spconv/spconv/conv.py | 355 + toolbox/spconv/spconv/functional.py | 118 + toolbox/spconv/spconv/modules.py | 130 + toolbox/spconv/spconv/ops.py | 157 + toolbox/spconv/spconv/pool.py | 105 + toolbox/spconv/spconv/test_utils.py | 190 + toolbox/spconv/spconv/utils/__init__.py | 112 + toolbox/spconv/src/spconv/CMakeLists.txt | 9 + toolbox/spconv/src/spconv/all.cc | 34 + toolbox/spconv/src/spconv/indice.cc | 90 + toolbox/spconv/src/spconv/indice.cu | 158 + toolbox/spconv/src/spconv/maxpool.cc | 82 + toolbox/spconv/src/spconv/maxpool.cu | 472 + toolbox/spconv/src/spconv/reordering.cc | 70 + toolbox/spconv/src/spconv/reordering.cu | 155 + toolbox/spconv/src/utils/CMakeLists.txt | 22 + toolbox/spconv/src/utils/all.cc | 54 + toolbox/spconv/src/utils/nms.cu | 165 + toolbox/spconv/test/CMakeLists.txt | 27 + toolbox/spconv/test/src/catch_main.cpp | 15 + toolbox/spconv/test/src/test_conv_rule.cpp | 127 + toolbox/spconv/test/test_SparseConv2d.py | 125 + toolbox/spconv/test/test_SparseConv3d.py | 160 + toolbox/spconv/test/test_SparseConvTensor.py | 20 + .../spconv/test/test_SparseInverseConv2d.py | 90 + .../spconv/test/test_SparseInverseConv3d.py | 91 + toolbox/spconv/test/test_SubMConv2d.py | 126 + toolbox/spconv/test/test_SubMConv3d.py | 123 + toolbox/spconv/test/test_conv.py | 618 + toolbox/spconv/third_party/catch2/catch.hpp | 14020 ++++++++++++++++ .../spconv/third_party/pybind11/.appveyor.yml | 35 + .../spconv/third_party/pybind11/.clang-format | 38 + .../spconv/third_party/pybind11/.clang-tidy | 75 + .../third_party/pybind11/.cmake-format.yaml | 73 + .../third_party/pybind11/.gitattributes | 1 + .../third_party/pybind11/.github/CODEOWNERS | 9 + .../pybind11/.github/CONTRIBUTING.md | 388 + .../.github/ISSUE_TEMPLATE/bug-report.yml | 45 + .../.github/ISSUE_TEMPLATE/config.yml | 8 + .../pybind11/.github/dependabot.yml | 7 + .../third_party/pybind11/.github/labeler.yml | 8 + .../pybind11/.github/labeler_merged.yml | 3 + .../pybind11/.github/matchers/pylint.json | 32 + .../pybind11/.github/pull_request_template.md | 19 + .../pybind11/.github/workflows/ci.yml | 956 ++ .../pybind11/.github/workflows/configure.yml | 80 + .../pybind11/.github/workflows/format.yml | 55 + .../pybind11/.github/workflows/labeler.yml | 16 + .../pybind11/.github/workflows/pip.yml | 110 + .../pybind11/.github/workflows/upstream.yml | 112 + .../spconv/third_party/pybind11/.gitignore | 45 + .../pybind11/.pre-commit-config.yaml | 170 + .../third_party/pybind11/.readthedocs.yml | 3 + .../third_party/pybind11/CMakeLists.txt | 299 + toolbox/spconv/third_party/pybind11/LICENSE | 29 + .../spconv/third_party/pybind11/MANIFEST.in | 5 + .../spconv/third_party/pybind11/README.rst | 180 + .../spconv/third_party/pybind11/docs/Doxyfile | 21 + .../pybind11/docs/_static/css/custom.css | 3 + .../pybind11/docs/advanced/cast/chrono.rst | 81 + .../pybind11/docs/advanced/cast/custom.rst | 93 + .../pybind11/docs/advanced/cast/eigen.rst | 310 + .../docs/advanced/cast/functional.rst | 109 + .../pybind11/docs/advanced/cast/index.rst | 43 + .../pybind11/docs/advanced/cast/overview.rst | 170 + .../pybind11/docs/advanced/cast/stl.rst | 249 + .../pybind11/docs/advanced/cast/strings.rst | 292 + .../pybind11/docs/advanced/classes.rst | 1335 ++ .../pybind11/docs/advanced/embedding.rst | 262 + .../pybind11/docs/advanced/exceptions.rst | 398 + .../pybind11/docs/advanced/functions.rst | 614 + .../pybind11/docs/advanced/misc.rst | 337 + .../pybind11/docs/advanced/pycpp/index.rst | 13 + .../pybind11/docs/advanced/pycpp/numpy.rst | 455 + .../pybind11/docs/advanced/pycpp/object.rst | 286 + .../docs/advanced/pycpp/utilities.rst | 155 + .../pybind11/docs/advanced/smart_ptrs.rst | 174 + .../third_party/pybind11/docs/basics.rst | 307 + .../third_party/pybind11/docs/benchmark.py | 87 + .../third_party/pybind11/docs/benchmark.rst | 95 + .../third_party/pybind11/docs/changelog.rst | 2468 +++ .../third_party/pybind11/docs/classes.rst | 541 + .../third_party/pybind11/docs/cmake/index.rst | 8 + .../third_party/pybind11/docs/compiling.rst | 638 + .../spconv/third_party/pybind11/docs/conf.py | 369 + .../spconv/third_party/pybind11/docs/faq.rst | 307 + .../third_party/pybind11/docs/index.rst | 48 + .../third_party/pybind11/docs/installing.rst | 105 + .../third_party/pybind11/docs/limitations.rst | 72 + .../pybind11/docs/pybind11-logo.png | Bin 0 -> 61034 bytes .../docs/pybind11_vs_boost_python1.png | Bin 0 -> 44653 bytes .../docs/pybind11_vs_boost_python1.svg | 427 + .../docs/pybind11_vs_boost_python2.png | Bin 0 -> 41121 bytes .../docs/pybind11_vs_boost_python2.svg | 427 + .../third_party/pybind11/docs/reference.rst | 130 + .../third_party/pybind11/docs/release.rst | 97 + .../pybind11/docs/requirements.txt | 6 + .../third_party/pybind11/docs/upgrade.rst | 552 + .../pybind11/include/pybind11/attr.h | 678 + .../pybind11/include/pybind11/buffer_info.h | 193 + .../pybind11/include/pybind11/cast.h | 1665 ++ .../pybind11/include/pybind11/chrono.h | 225 + .../pybind11/include/pybind11/common.h | 2 + .../pybind11/include/pybind11/complex.h | 74 + .../pybind11/include/pybind11/detail/class.h | 742 + .../pybind11/include/pybind11/detail/common.h | 1169 ++ .../pybind11/include/pybind11/detail/descr.h | 158 + .../pybind11/include/pybind11/detail/init.h | 428 + .../include/pybind11/detail/internals.h | 562 + .../pybind11/detail/type_caster_base.h | 1010 ++ .../pybind11/include/pybind11/detail/typeid.h | 65 + .../pybind11/include/pybind11/eigen.h | 708 + .../pybind11/include/pybind11/embed.h | 277 + .../pybind11/include/pybind11/eval.h | 156 + .../pybind11/include/pybind11/functional.h | 130 + .../pybind11/include/pybind11/gil.h | 202 + .../pybind11/include/pybind11/iostream.h | 265 + .../pybind11/include/pybind11/numpy.h | 1984 +++ .../pybind11/include/pybind11/operators.h | 201 + .../pybind11/include/pybind11/options.h | 76 + .../pybind11/include/pybind11/pybind11.h | 2864 ++++ .../pybind11/include/pybind11/pytypes.h | 2392 +++ .../pybind11/include/pybind11/stl.h | 425 + .../include/pybind11/stl/filesystem.h | 116 + .../pybind11/include/pybind11/stl_bind.h | 785 + .../spconv/third_party/pybind11/noxfile.py | 97 + .../third_party/pybind11/pybind11/__init__.py | 16 + .../third_party/pybind11/pybind11/__main__.py | 49 + .../third_party/pybind11/pybind11/_version.py | 12 + .../third_party/pybind11/pybind11/commands.py | 25 + .../third_party/pybind11/pybind11/py.typed | 0 .../pybind11/pybind11/setup_helpers.py | 504 + .../third_party/pybind11/pyproject.toml | 61 + toolbox/spconv/third_party/pybind11/setup.cfg | 50 + toolbox/spconv/third_party/pybind11/setup.py | 149 + .../third_party/pybind11/tests/CMakeLists.txt | 558 + .../third_party/pybind11/tests/conftest.py | 213 + .../pybind11/tests/constructor_stats.h | 322 + .../pybind11/tests/cross_module_gil_utils.cpp | 45 + ...s_module_interleaved_error_already_set.cpp | 51 + .../spconv/third_party/pybind11/tests/env.py | 28 + .../tests/extra_python_package/pytest.ini | 0 .../tests/extra_python_package/test_files.py | 274 + .../tests/extra_setuptools/pytest.ini | 0 .../extra_setuptools/test_setuphelper.py | 151 + .../pybind11/tests/local_bindings.h | 92 + .../third_party/pybind11/tests/object.h | 205 + .../tests/pybind11_cross_module_tests.cpp | 149 + .../pybind11/tests/pybind11_tests.cpp | 117 + .../pybind11/tests/pybind11_tests.h | 85 + .../third_party/pybind11/tests/pytest.ini | 22 + .../pybind11/tests/requirements.txt | 9 + .../third_party/pybind11/tests/test_async.cpp | 25 + .../third_party/pybind11/tests/test_async.py | 24 + .../pybind11/tests/test_buffers.cpp | 224 + .../pybind11/tests/test_buffers.py | 163 + .../pybind11/tests/test_builtin_casters.cpp | 382 + .../pybind11/tests/test_builtin_casters.py | 526 + .../pybind11/tests/test_call_policies.cpp | 115 + .../pybind11/tests/test_call_policies.py | 247 + .../pybind11/tests/test_callbacks.cpp | 243 + .../pybind11/tests/test_callbacks.py | 195 + .../pybind11/tests/test_chrono.cpp | 81 + .../third_party/pybind11/tests/test_chrono.py | 209 + .../third_party/pybind11/tests/test_class.cpp | 619 + .../third_party/pybind11/tests/test_class.py | 471 + .../tests/test_cmake_build/CMakeLists.txt | 84 + .../pybind11/tests/test_cmake_build/embed.cpp | 23 + .../installed_embed/CMakeLists.txt | 28 + .../installed_function/CMakeLists.txt | 39 + .../installed_target/CMakeLists.txt | 46 + .../pybind11/tests/test_cmake_build/main.cpp | 6 + .../subdirectory_embed/CMakeLists.txt | 41 + .../subdirectory_function/CMakeLists.txt | 35 + .../subdirectory_target/CMakeLists.txt | 41 + .../pybind11/tests/test_cmake_build/test.py | 8 + .../pybind11/tests/test_const_name.cpp | 55 + .../pybind11/tests/test_const_name.py | 29 + .../tests/test_constants_and_functions.cpp | 159 + .../tests/test_constants_and_functions.py | 52 + .../pybind11/tests/test_copy_move.cpp | 295 + .../pybind11/tests/test_copy_move.py | 132 + .../tests/test_custom_type_casters.cpp | 209 + .../tests/test_custom_type_casters.py | 120 + .../pybind11/tests/test_custom_type_setup.cpp | 41 + .../pybind11/tests/test_custom_type_setup.py | 48 + .../pybind11/tests/test_docstring_options.cpp | 88 + .../pybind11/tests/test_docstring_options.py | 41 + .../third_party/pybind11/tests/test_eigen.cpp | 401 + .../third_party/pybind11/tests/test_eigen.py | 775 + .../pybind11/tests/test_embed/CMakeLists.txt | 47 + .../pybind11/tests/test_embed/catch.cpp | 27 + .../tests/test_embed/external_module.cpp | 20 + .../tests/test_embed/test_interpreter.cpp | 395 + .../tests/test_embed/test_interpreter.py | 14 + .../tests/test_embed/test_trampoline.py | 16 + .../third_party/pybind11/tests/test_enum.cpp | 133 + .../third_party/pybind11/tests/test_enum.py | 264 + .../third_party/pybind11/tests/test_eval.cpp | 118 + .../third_party/pybind11/tests/test_eval.py | 50 + .../pybind11/tests/test_eval_call.py | 4 + .../pybind11/tests/test_exceptions.cpp | 337 + .../pybind11/tests/test_exceptions.h | 13 + .../pybind11/tests/test_exceptions.py | 362 + .../tests/test_factory_constructors.cpp | 430 + .../tests/test_factory_constructors.py | 516 + .../pybind11/tests/test_gil_scoped.cpp | 47 + .../pybind11/tests/test_gil_scoped.py | 93 + .../pybind11/tests/test_iostream.cpp | 126 + .../pybind11/tests/test_iostream.py | 295 + .../tests/test_kwargs_and_defaults.cpp | 273 + .../tests/test_kwargs_and_defaults.py | 390 + .../pybind11/tests/test_local_bindings.cpp | 106 + .../pybind11/tests/test_local_bindings.py | 256 + .../tests/test_methods_and_attributes.cpp | 459 + .../tests/test_methods_and_attributes.py | 527 + .../pybind11/tests/test_modules.cpp | 125 + .../pybind11/tests/test_modules.py | 121 + .../tests/test_multiple_inheritance.cpp | 341 + .../tests/test_multiple_inheritance.py | 493 + .../pybind11/tests/test_numpy_array.cpp | 524 + .../pybind11/tests/test_numpy_array.py | 587 + .../pybind11/tests/test_numpy_dtypes.cpp | 614 + .../pybind11/tests/test_numpy_dtypes.py | 446 + .../pybind11/tests/test_numpy_vectorize.cpp | 107 + .../pybind11/tests/test_numpy_vectorize.py | 266 + .../pybind11/tests/test_opaque_types.cpp | 77 + .../pybind11/tests/test_opaque_types.py | 58 + .../tests/test_operator_overloading.cpp | 288 + .../tests/test_operator_overloading.py | 152 + .../pybind11/tests/test_pickling.cpp | 194 + .../pybind11/tests/test_pickling.py | 93 + .../pybind11/tests/test_pytypes.cpp | 759 + .../pybind11/tests/test_pytypes.py | 741 + .../tests/test_sequences_and_iterators.cpp | 562 + .../tests/test_sequences_and_iterators.py | 243 + .../pybind11/tests/test_smart_ptr.cpp | 470 + .../pybind11/tests/test_smart_ptr.py | 315 + .../third_party/pybind11/tests/test_stl.cpp | 545 + .../third_party/pybind11/tests/test_stl.py | 377 + .../pybind11/tests/test_stl_binders.cpp | 152 + .../pybind11/tests/test_stl_binders.py | 311 + .../tests/test_tagbased_polymorphic.cpp | 147 + .../tests/test_tagbased_polymorphic.py | 28 + .../pybind11/tests/test_thread.cpp | 66 + .../third_party/pybind11/tests/test_thread.py | 42 + .../third_party/pybind11/tests/test_union.cpp | 22 + .../third_party/pybind11/tests/test_union.py | 8 + .../pybind11/tests/test_virtual_functions.cpp | 591 + .../pybind11/tests/test_virtual_functions.py | 459 + .../pybind11/tests/valgrind-numpy-scipy.supp | 140 + .../pybind11/tests/valgrind-python.supp | 117 + .../pybind11/tools/FindCatch.cmake | 72 + .../pybind11/tools/FindEigen3.cmake | 86 + .../pybind11/tools/FindPythonLibsNew.cmake | 281 + .../third_party/pybind11/tools/check-style.sh | 44 + .../pybind11/tools/cmake_uninstall.cmake.in | 23 + .../third_party/pybind11/tools/libsize.py | 36 + .../pybind11/tools/make_changelog.py | 63 + .../pybind11/tools/pybind11Common.cmake | 385 + .../pybind11/tools/pybind11Config.cmake.in | 231 + .../pybind11/tools/pybind11NewTools.cmake | 254 + .../pybind11/tools/pybind11Tools.cmake | 227 + .../third_party/pybind11/tools/pyproject.toml | 3 + .../pybind11/tools/setup_global.py.in | 59 + .../pybind11/tools/setup_main.py.in | 40 + 572 files changed, 122815 insertions(+) create mode 100644 cv/3d_detection/Part-A2-Anchor/README.md create mode 100644 cv/3d_detection/Part-A2-Free/README.md create mode 100644 toolbox/openpcdet/.gitignore create mode 100644 toolbox/openpcdet/LICENSE create mode 100644 toolbox/openpcdet/README-ILUVATAR.md create mode 100644 toolbox/openpcdet/README.md create mode 100644 toolbox/openpcdet/build_openpcdet.sh create mode 100644 toolbox/openpcdet/clean_openpcdet.sh create mode 100644 toolbox/openpcdet/docker/Dockerfile create mode 100644 toolbox/openpcdet/docker/README.md create mode 100644 toolbox/openpcdet/docker/cu116.Dockerfile create mode 100644 toolbox/openpcdet/docs/CUSTOM_DATASET_TUTORIAL.md create mode 100644 toolbox/openpcdet/docs/DEMO.md create mode 100644 toolbox/openpcdet/docs/GETTING_STARTED.md create mode 100644 toolbox/openpcdet/docs/INSTALL.md create mode 100644 toolbox/openpcdet/docs/changelog.md create mode 100644 toolbox/openpcdet/docs/dataset_vs_model.png create mode 100644 toolbox/openpcdet/docs/demo.png create mode 100644 toolbox/openpcdet/docs/guidelines_of_approaches/bevfusion.md create mode 100644 toolbox/openpcdet/docs/guidelines_of_approaches/mppnet.md create mode 100644 toolbox/openpcdet/docs/model_framework.png create mode 100644 toolbox/openpcdet/docs/multiple_models_demo.png create mode 100644 toolbox/openpcdet/docs/open_mmlab.png create mode 100644 toolbox/openpcdet/install_openpcdet.sh create mode 100644 toolbox/openpcdet/pcdet/__init__.py create mode 100644 toolbox/openpcdet/pcdet/config.py create mode 100644 toolbox/openpcdet/pcdet/datasets/__init__.py create mode 100644 toolbox/openpcdet/pcdet/datasets/argo2/__init__.py create mode 100644 toolbox/openpcdet/pcdet/datasets/argo2/argo2_dataset.py create mode 100644 toolbox/openpcdet/pcdet/datasets/argo2/argo2_utils/constants.py create mode 100644 toolbox/openpcdet/pcdet/datasets/argo2/argo2_utils/so3.py create mode 100644 toolbox/openpcdet/pcdet/datasets/augmentor/__init__.py create mode 100644 toolbox/openpcdet/pcdet/datasets/augmentor/augmentor_utils.py create mode 100644 toolbox/openpcdet/pcdet/datasets/augmentor/data_augmentor.py create mode 100644 toolbox/openpcdet/pcdet/datasets/augmentor/database_sampler.py create mode 100644 toolbox/openpcdet/pcdet/datasets/custom/__init__.py create mode 100644 toolbox/openpcdet/pcdet/datasets/custom/custom_dataset.py create mode 100644 toolbox/openpcdet/pcdet/datasets/dataset.py create mode 100644 toolbox/openpcdet/pcdet/datasets/kitti/__init__.py create mode 100644 toolbox/openpcdet/pcdet/datasets/kitti/kitti_dataset.py create mode 100644 toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/LICENSE create mode 100644 toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/README.md create mode 100644 toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/__init__.py create mode 100644 toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/eval.py create mode 100644 toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/evaluate.py create mode 100644 toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/kitti_common.py create mode 100644 toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/rotate_iou.py create mode 100644 toolbox/openpcdet/pcdet/datasets/kitti/kitti_utils.py create mode 100644 toolbox/openpcdet/pcdet/datasets/lyft/__init__.py create mode 100644 toolbox/openpcdet/pcdet/datasets/lyft/lyft_dataset.py create mode 100644 toolbox/openpcdet/pcdet/datasets/lyft/lyft_mAP_eval/__init__.py create mode 100644 toolbox/openpcdet/pcdet/datasets/lyft/lyft_mAP_eval/lyft_eval.py create mode 100644 toolbox/openpcdet/pcdet/datasets/lyft/lyft_utils.py create mode 100644 toolbox/openpcdet/pcdet/datasets/nuscenes/__init__.py create mode 100644 toolbox/openpcdet/pcdet/datasets/nuscenes/nuscenes_dataset.py create mode 100644 toolbox/openpcdet/pcdet/datasets/nuscenes/nuscenes_utils.py create mode 100644 toolbox/openpcdet/pcdet/datasets/once/__init__.py create mode 100644 toolbox/openpcdet/pcdet/datasets/once/once_dataset.py create mode 100644 toolbox/openpcdet/pcdet/datasets/once/once_eval/eval_utils.py create mode 100644 toolbox/openpcdet/pcdet/datasets/once/once_eval/evaluation.py create mode 100644 toolbox/openpcdet/pcdet/datasets/once/once_eval/iou_utils.py create mode 100644 toolbox/openpcdet/pcdet/datasets/once/once_toolkits.py create mode 100644 toolbox/openpcdet/pcdet/datasets/pandaset/__init__.py create mode 100644 toolbox/openpcdet/pcdet/datasets/pandaset/pandaset_dataset.py create mode 100644 toolbox/openpcdet/pcdet/datasets/processor/__init__.py create mode 100644 toolbox/openpcdet/pcdet/datasets/processor/data_processor.py create mode 100644 toolbox/openpcdet/pcdet/datasets/processor/point_feature_encoder.py create mode 100644 toolbox/openpcdet/pcdet/datasets/waymo/__init__.py create mode 100644 toolbox/openpcdet/pcdet/datasets/waymo/waymo_dataset.py create mode 100644 toolbox/openpcdet/pcdet/datasets/waymo/waymo_eval.py create mode 100644 toolbox/openpcdet/pcdet/datasets/waymo/waymo_utils.py create mode 100644 toolbox/openpcdet/pcdet/models/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_2d/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_2d/base_bev_backbone.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_2d/fuser/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_2d/fuser/convfuser.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_2d/map_to_bev/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_2d/map_to_bev/conv2d_collapse.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_2d/map_to_bev/height_compression.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_2d/map_to_bev/pointpillar_scatter.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/dsvt.py create mode 100755 toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/SemanticSeg/basic_blocks.py create mode 100755 toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/SemanticSeg/pyramid_ffn.py create mode 100755 toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/SemanticSeg/sem_deeplabv3.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/focal_sparse_conv.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/focal_sparse_utils.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/pfe/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/pointnet2_backbone.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone_2d.py create mode 100755 toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone_focal.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone_voxelnext.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone_voxelnext2d.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/spconv_unet.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/dynamic_mean_vfe.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/dynamic_pillar_vfe.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/dynamic_voxel_vfe.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/frustum_grid_generator.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/frustum_to_voxel.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/sampler.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/ddn_deeplabv3.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/ddn_template.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/balancer.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/ddn_loss.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/depth_ffn.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/mean_vfe.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/pillar_vfe.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_3d/vfe/vfe_template.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_image/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_image/img_neck/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_image/img_neck/generalized_lss.py create mode 100644 toolbox/openpcdet/pcdet/models/backbones_image/swin.py create mode 100644 toolbox/openpcdet/pcdet/models/dense_heads/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/dense_heads/anchor_head_multi.py create mode 100644 toolbox/openpcdet/pcdet/models/dense_heads/anchor_head_single.py create mode 100644 toolbox/openpcdet/pcdet/models/dense_heads/anchor_head_template.py create mode 100644 toolbox/openpcdet/pcdet/models/dense_heads/center_head.py create mode 100644 toolbox/openpcdet/pcdet/models/dense_heads/point_head_box.py create mode 100644 toolbox/openpcdet/pcdet/models/dense_heads/point_head_simple.py create mode 100644 toolbox/openpcdet/pcdet/models/dense_heads/point_head_template.py create mode 100644 toolbox/openpcdet/pcdet/models/dense_heads/point_intra_part_head.py create mode 100644 toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/anchor_generator.py create mode 100644 toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/atss_target_assigner.py create mode 100644 toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/axis_aligned_target_assigner.py create mode 100644 toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/hungarian_assigner.py create mode 100644 toolbox/openpcdet/pcdet/models/dense_heads/transfusion_head.py create mode 100644 toolbox/openpcdet/pcdet/models/dense_heads/voxelnext_head.py create mode 100644 toolbox/openpcdet/pcdet/models/detectors/PartA2_net.py create mode 100644 toolbox/openpcdet/pcdet/models/detectors/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/detectors/bevfusion.py create mode 100644 toolbox/openpcdet/pcdet/models/detectors/caddn.py create mode 100644 toolbox/openpcdet/pcdet/models/detectors/centerpoint.py create mode 100644 toolbox/openpcdet/pcdet/models/detectors/detector3d_template.py create mode 100644 toolbox/openpcdet/pcdet/models/detectors/mppnet.py create mode 100644 toolbox/openpcdet/pcdet/models/detectors/mppnet_e2e.py create mode 100644 toolbox/openpcdet/pcdet/models/detectors/pillarnet.py create mode 100644 toolbox/openpcdet/pcdet/models/detectors/point_rcnn.py create mode 100644 toolbox/openpcdet/pcdet/models/detectors/pointpillar.py create mode 100644 toolbox/openpcdet/pcdet/models/detectors/pv_rcnn.py create mode 100644 toolbox/openpcdet/pcdet/models/detectors/pv_rcnn_plusplus.py create mode 100644 toolbox/openpcdet/pcdet/models/detectors/second_net.py create mode 100644 toolbox/openpcdet/pcdet/models/detectors/second_net_iou.py create mode 100644 toolbox/openpcdet/pcdet/models/detectors/transfusion.py create mode 100644 toolbox/openpcdet/pcdet/models/detectors/voxel_rcnn.py create mode 100644 toolbox/openpcdet/pcdet/models/detectors/voxelnext.py create mode 100644 toolbox/openpcdet/pcdet/models/model_utils/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/model_utils/basic_block_2d.py create mode 100644 toolbox/openpcdet/pcdet/models/model_utils/centernet_utils.py create mode 100644 toolbox/openpcdet/pcdet/models/model_utils/dsvt_utils.py create mode 100644 toolbox/openpcdet/pcdet/models/model_utils/model_nms_utils.py create mode 100644 toolbox/openpcdet/pcdet/models/model_utils/mppnet_utils.py create mode 100644 toolbox/openpcdet/pcdet/models/model_utils/swin_utils.py create mode 100644 toolbox/openpcdet/pcdet/models/model_utils/transfusion_utils.py create mode 100644 toolbox/openpcdet/pcdet/models/roi_heads/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/roi_heads/mppnet_head.py create mode 100644 toolbox/openpcdet/pcdet/models/roi_heads/mppnet_memory_bank_e2e.py create mode 100644 toolbox/openpcdet/pcdet/models/roi_heads/partA2_head.py create mode 100644 toolbox/openpcdet/pcdet/models/roi_heads/pointrcnn_head.py create mode 100644 toolbox/openpcdet/pcdet/models/roi_heads/pvrcnn_head.py create mode 100644 toolbox/openpcdet/pcdet/models/roi_heads/roi_head_template.py create mode 100644 toolbox/openpcdet/pcdet/models/roi_heads/second_head.py create mode 100644 toolbox/openpcdet/pcdet/models/roi_heads/target_assigner/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/roi_heads/target_assigner/proposal_target_layer.py create mode 100644 toolbox/openpcdet/pcdet/models/roi_heads/voxelrcnn_head.py create mode 100644 toolbox/openpcdet/pcdet/models/view_transforms/__init__.py create mode 100644 toolbox/openpcdet/pcdet/models/view_transforms/depth_lss.py create mode 100644 toolbox/openpcdet/pcdet/ops/__init__.py create mode 100644 toolbox/openpcdet/pcdet/ops/bev_pool/__init__.py create mode 100644 toolbox/openpcdet/pcdet/ops/bev_pool/bev_pool.py create mode 100644 toolbox/openpcdet/pcdet/ops/bev_pool/src/bev_pool.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/bev_pool/src/bev_pool_cuda.cu create mode 100644 toolbox/openpcdet/pcdet/ops/ingroup_inds/ingroup_inds_op.py create mode 100644 toolbox/openpcdet/pcdet/ops/ingroup_inds/src/error.cuh create mode 100644 toolbox/openpcdet/pcdet/ops/ingroup_inds/src/ingroup_inds.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/ingroup_inds/src/ingroup_inds_kernel.cu create mode 100644 toolbox/openpcdet/pcdet/ops/iou3d_nms/__init__.py create mode 100644 toolbox/openpcdet/pcdet/ops/iou3d_nms/iou3d_nms_utils.py create mode 100644 toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_cpu.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_cpu.h create mode 100644 toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_nms.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_nms.h create mode 100644 toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_nms_api.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_nms_kernel.cu create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/__init__.py create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/__init__.py create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_modules.py create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_utils.py create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query_gpu.cu create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query_gpu.h create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/cuda_utils.h create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/group_points.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/group_points_gpu.cu create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/group_points_gpu.h create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/interpolate.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/interpolate_gpu.cu create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/interpolate_gpu.h create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/pointnet2_api.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/sampling.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/sampling_gpu.cu create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/sampling_gpu.h create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/__init__.py create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_modules.py create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_utils.py create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query_gpu.cu create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query_gpu.h create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/cuda_utils.h create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/group_points.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/group_points_gpu.cu create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/group_points_gpu.h create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/interpolate.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/interpolate_gpu.cu create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/interpolate_gpu.h create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/pointnet2_api.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/sampling.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/sampling_gpu.cu create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/sampling_gpu.h create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/vector_pool.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/vector_pool_gpu.cu create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/vector_pool_gpu.h create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query_gpu.cu create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query_gpu.h create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/voxel_pool_modules.py create mode 100644 toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/voxel_query_utils.py create mode 100644 toolbox/openpcdet/pcdet/ops/roiaware_pool3d/__init__.py create mode 100644 toolbox/openpcdet/pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py create mode 100644 toolbox/openpcdet/pcdet/ops/roiaware_pool3d/src/roiaware_pool3d.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/roiaware_pool3d/src/roiaware_pool3d_kernel.cu create mode 100644 toolbox/openpcdet/pcdet/ops/roipoint_pool3d/__init__.py create mode 100644 toolbox/openpcdet/pcdet/ops/roipoint_pool3d/roipoint_pool3d_utils.py create mode 100644 toolbox/openpcdet/pcdet/ops/roipoint_pool3d/src/roipoint_pool3d.cpp create mode 100644 toolbox/openpcdet/pcdet/ops/roipoint_pool3d/src/roipoint_pool3d_kernel.cu create mode 100644 toolbox/openpcdet/pcdet/utils/__init__.py create mode 100644 toolbox/openpcdet/pcdet/utils/box_coder_utils.py create mode 100644 toolbox/openpcdet/pcdet/utils/box_utils.py create mode 100644 toolbox/openpcdet/pcdet/utils/calibration_kitti.py create mode 100644 toolbox/openpcdet/pcdet/utils/common_utils.py create mode 100644 toolbox/openpcdet/pcdet/utils/commu_utils.py create mode 100644 toolbox/openpcdet/pcdet/utils/loss_utils.py create mode 100644 toolbox/openpcdet/pcdet/utils/object3d_custom.py create mode 100644 toolbox/openpcdet/pcdet/utils/object3d_kitti.py create mode 100644 toolbox/openpcdet/pcdet/utils/spconv_utils.py create mode 100644 toolbox/openpcdet/pcdet/utils/transform_utils.py create mode 100644 toolbox/openpcdet/requirements.txt create mode 100644 toolbox/openpcdet/setup.py create mode 100644 toolbox/openpcdet/tools/_init_path.py create mode 100644 toolbox/openpcdet/tools/demo.py create mode 100644 toolbox/openpcdet/tools/eval_utils/eval_utils.py create mode 100644 toolbox/openpcdet/tools/process_tools/create_integrated_database.py create mode 100755 toolbox/openpcdet/tools/scripts/slurm_test_mgpu.sh create mode 100644 toolbox/openpcdet/tools/scripts/slurm_test_single.sh create mode 100644 toolbox/openpcdet/tools/scripts/slurm_train.sh create mode 100644 toolbox/openpcdet/tools/scripts/slurm_train_v2.sh create mode 100644 toolbox/openpcdet/tools/scripts/torch_train.sh create mode 100644 toolbox/openpcdet/tools/test.py create mode 100644 toolbox/openpcdet/tools/train.py create mode 100644 toolbox/openpcdet/tools/train_utils/optimization/__init__.py create mode 100644 toolbox/openpcdet/tools/train_utils/optimization/fastai_optim.py create mode 100644 toolbox/openpcdet/tools/train_utils/optimization/learning_schedules_fastai.py create mode 100644 toolbox/openpcdet/tools/train_utils/train_utils.py create mode 100644 toolbox/openpcdet/tools/visual_utils/open3d_vis_utils.py create mode 100644 toolbox/openpcdet/tools/visual_utils/visualize_utils.py create mode 100644 toolbox/spconv/.gitignore create mode 100644 toolbox/spconv/.gitmodules create mode 100644 toolbox/spconv/CMakeLists.txt create mode 100644 toolbox/spconv/LICENSE create mode 100644 toolbox/spconv/README-ILUVATAR.md create mode 100644 toolbox/spconv/README.md create mode 100644 toolbox/spconv/build_spconv.sh create mode 100644 toolbox/spconv/clean_spconv.sh create mode 100644 toolbox/spconv/include/paramsgrid.h create mode 100644 toolbox/spconv/include/prettyprint.h create mode 100644 toolbox/spconv/include/pybind11_utils.h create mode 100644 toolbox/spconv/include/spconv/box_iou.h create mode 100644 toolbox/spconv/include/spconv/geometry.h create mode 100644 toolbox/spconv/include/spconv/indice.cu.h create mode 100644 toolbox/spconv/include/spconv/indice.h create mode 100644 toolbox/spconv/include/spconv/maxpool.h create mode 100644 toolbox/spconv/include/spconv/mp_helper.h create mode 100644 toolbox/spconv/include/spconv/nms.h create mode 100644 toolbox/spconv/include/spconv/nms_gpu.h create mode 100644 toolbox/spconv/include/spconv/point2voxel.h create mode 100644 toolbox/spconv/include/spconv/pool_ops.h create mode 100644 toolbox/spconv/include/spconv/reordering.cu.h create mode 100644 toolbox/spconv/include/spconv/reordering.h create mode 100644 toolbox/spconv/include/spconv/spconv_ops.h create mode 100644 toolbox/spconv/include/tensorview/helper_kernel.cu.h create mode 100644 toolbox/spconv/include/tensorview/helper_launch.h create mode 100644 toolbox/spconv/include/tensorview/tensorview.h create mode 100644 toolbox/spconv/include/torch_utils.h create mode 100644 toolbox/spconv/include/utility/timer.h create mode 100644 toolbox/spconv/install_spconv.sh create mode 100644 toolbox/spconv/setup.py create mode 100644 toolbox/spconv/spconv/__init__.py create mode 100644 toolbox/spconv/spconv/conv.py create mode 100644 toolbox/spconv/spconv/functional.py create mode 100644 toolbox/spconv/spconv/modules.py create mode 100644 toolbox/spconv/spconv/ops.py create mode 100644 toolbox/spconv/spconv/pool.py create mode 100644 toolbox/spconv/spconv/test_utils.py create mode 100644 toolbox/spconv/spconv/utils/__init__.py create mode 100644 toolbox/spconv/src/spconv/CMakeLists.txt create mode 100644 toolbox/spconv/src/spconv/all.cc create mode 100644 toolbox/spconv/src/spconv/indice.cc create mode 100644 toolbox/spconv/src/spconv/indice.cu create mode 100644 toolbox/spconv/src/spconv/maxpool.cc create mode 100644 toolbox/spconv/src/spconv/maxpool.cu create mode 100644 toolbox/spconv/src/spconv/reordering.cc create mode 100644 toolbox/spconv/src/spconv/reordering.cu create mode 100644 toolbox/spconv/src/utils/CMakeLists.txt create mode 100644 toolbox/spconv/src/utils/all.cc create mode 100644 toolbox/spconv/src/utils/nms.cu create mode 100644 toolbox/spconv/test/CMakeLists.txt create mode 100644 toolbox/spconv/test/src/catch_main.cpp create mode 100644 toolbox/spconv/test/src/test_conv_rule.cpp create mode 100644 toolbox/spconv/test/test_SparseConv2d.py create mode 100644 toolbox/spconv/test/test_SparseConv3d.py create mode 100644 toolbox/spconv/test/test_SparseConvTensor.py create mode 100644 toolbox/spconv/test/test_SparseInverseConv2d.py create mode 100644 toolbox/spconv/test/test_SparseInverseConv3d.py create mode 100644 toolbox/spconv/test/test_SubMConv2d.py create mode 100644 toolbox/spconv/test/test_SubMConv3d.py create mode 100644 toolbox/spconv/test/test_conv.py create mode 100644 toolbox/spconv/third_party/catch2/catch.hpp create mode 100644 toolbox/spconv/third_party/pybind11/.appveyor.yml create mode 100644 toolbox/spconv/third_party/pybind11/.clang-format create mode 100644 toolbox/spconv/third_party/pybind11/.clang-tidy create mode 100644 toolbox/spconv/third_party/pybind11/.cmake-format.yaml create mode 100644 toolbox/spconv/third_party/pybind11/.gitattributes create mode 100644 toolbox/spconv/third_party/pybind11/.github/CODEOWNERS create mode 100644 toolbox/spconv/third_party/pybind11/.github/CONTRIBUTING.md create mode 100644 toolbox/spconv/third_party/pybind11/.github/ISSUE_TEMPLATE/bug-report.yml create mode 100644 toolbox/spconv/third_party/pybind11/.github/ISSUE_TEMPLATE/config.yml create mode 100644 toolbox/spconv/third_party/pybind11/.github/dependabot.yml create mode 100644 toolbox/spconv/third_party/pybind11/.github/labeler.yml create mode 100644 toolbox/spconv/third_party/pybind11/.github/labeler_merged.yml create mode 100644 toolbox/spconv/third_party/pybind11/.github/matchers/pylint.json create mode 100644 toolbox/spconv/third_party/pybind11/.github/pull_request_template.md create mode 100644 toolbox/spconv/third_party/pybind11/.github/workflows/ci.yml create mode 100644 toolbox/spconv/third_party/pybind11/.github/workflows/configure.yml create mode 100644 toolbox/spconv/third_party/pybind11/.github/workflows/format.yml create mode 100644 toolbox/spconv/third_party/pybind11/.github/workflows/labeler.yml create mode 100644 toolbox/spconv/third_party/pybind11/.github/workflows/pip.yml create mode 100644 toolbox/spconv/third_party/pybind11/.github/workflows/upstream.yml create mode 100644 toolbox/spconv/third_party/pybind11/.gitignore create mode 100644 toolbox/spconv/third_party/pybind11/.pre-commit-config.yaml create mode 100644 toolbox/spconv/third_party/pybind11/.readthedocs.yml create mode 100644 toolbox/spconv/third_party/pybind11/CMakeLists.txt create mode 100644 toolbox/spconv/third_party/pybind11/LICENSE create mode 100644 toolbox/spconv/third_party/pybind11/MANIFEST.in create mode 100644 toolbox/spconv/third_party/pybind11/README.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/Doxyfile create mode 100644 toolbox/spconv/third_party/pybind11/docs/_static/css/custom.css create mode 100644 toolbox/spconv/third_party/pybind11/docs/advanced/cast/chrono.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/advanced/cast/custom.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/advanced/cast/eigen.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/advanced/cast/functional.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/advanced/cast/index.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/advanced/cast/overview.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/advanced/cast/stl.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/advanced/cast/strings.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/advanced/classes.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/advanced/embedding.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/advanced/exceptions.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/advanced/functions.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/advanced/misc.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/advanced/pycpp/index.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/advanced/pycpp/numpy.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/advanced/pycpp/object.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/advanced/pycpp/utilities.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/advanced/smart_ptrs.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/basics.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/benchmark.py create mode 100644 toolbox/spconv/third_party/pybind11/docs/benchmark.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/changelog.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/classes.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/cmake/index.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/compiling.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/conf.py create mode 100644 toolbox/spconv/third_party/pybind11/docs/faq.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/index.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/installing.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/limitations.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/pybind11-logo.png create mode 100644 toolbox/spconv/third_party/pybind11/docs/pybind11_vs_boost_python1.png create mode 100644 toolbox/spconv/third_party/pybind11/docs/pybind11_vs_boost_python1.svg create mode 100644 toolbox/spconv/third_party/pybind11/docs/pybind11_vs_boost_python2.png create mode 100644 toolbox/spconv/third_party/pybind11/docs/pybind11_vs_boost_python2.svg create mode 100644 toolbox/spconv/third_party/pybind11/docs/reference.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/release.rst create mode 100644 toolbox/spconv/third_party/pybind11/docs/requirements.txt create mode 100644 toolbox/spconv/third_party/pybind11/docs/upgrade.rst create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/attr.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/buffer_info.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/cast.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/chrono.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/common.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/complex.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/detail/class.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/detail/common.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/detail/descr.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/detail/init.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/detail/internals.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/detail/type_caster_base.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/detail/typeid.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/eigen.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/embed.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/eval.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/functional.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/gil.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/iostream.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/numpy.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/operators.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/options.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/pybind11.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/pytypes.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/stl.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/stl/filesystem.h create mode 100644 toolbox/spconv/third_party/pybind11/include/pybind11/stl_bind.h create mode 100644 toolbox/spconv/third_party/pybind11/noxfile.py create mode 100644 toolbox/spconv/third_party/pybind11/pybind11/__init__.py create mode 100644 toolbox/spconv/third_party/pybind11/pybind11/__main__.py create mode 100644 toolbox/spconv/third_party/pybind11/pybind11/_version.py create mode 100644 toolbox/spconv/third_party/pybind11/pybind11/commands.py create mode 100644 toolbox/spconv/third_party/pybind11/pybind11/py.typed create mode 100644 toolbox/spconv/third_party/pybind11/pybind11/setup_helpers.py create mode 100644 toolbox/spconv/third_party/pybind11/pyproject.toml create mode 100644 toolbox/spconv/third_party/pybind11/setup.cfg create mode 100644 toolbox/spconv/third_party/pybind11/setup.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/CMakeLists.txt create mode 100644 toolbox/spconv/third_party/pybind11/tests/conftest.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/constructor_stats.h create mode 100644 toolbox/spconv/third_party/pybind11/tests/cross_module_gil_utils.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/cross_module_interleaved_error_already_set.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/env.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/extra_python_package/pytest.ini create mode 100644 toolbox/spconv/third_party/pybind11/tests/extra_python_package/test_files.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/extra_setuptools/pytest.ini create mode 100644 toolbox/spconv/third_party/pybind11/tests/extra_setuptools/test_setuphelper.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/local_bindings.h create mode 100644 toolbox/spconv/third_party/pybind11/tests/object.h create mode 100644 toolbox/spconv/third_party/pybind11/tests/pybind11_cross_module_tests.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/pybind11_tests.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/pybind11_tests.h create mode 100644 toolbox/spconv/third_party/pybind11/tests/pytest.ini create mode 100644 toolbox/spconv/third_party/pybind11/tests/requirements.txt create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_async.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_async.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_buffers.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_buffers.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_builtin_casters.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_builtin_casters.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_call_policies.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_call_policies.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_callbacks.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_callbacks.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_chrono.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_chrono.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_class.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_class.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_cmake_build/CMakeLists.txt create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_cmake_build/embed.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_cmake_build/installed_embed/CMakeLists.txt create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_cmake_build/installed_function/CMakeLists.txt create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_cmake_build/installed_target/CMakeLists.txt create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_cmake_build/main.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_cmake_build/subdirectory_embed/CMakeLists.txt create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_cmake_build/subdirectory_function/CMakeLists.txt create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_cmake_build/subdirectory_target/CMakeLists.txt create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_cmake_build/test.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_const_name.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_const_name.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_constants_and_functions.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_constants_and_functions.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_copy_move.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_copy_move.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_custom_type_casters.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_custom_type_casters.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_custom_type_setup.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_custom_type_setup.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_docstring_options.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_docstring_options.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_eigen.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_eigen.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_embed/CMakeLists.txt create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_embed/catch.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_embed/external_module.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_embed/test_interpreter.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_embed/test_interpreter.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_embed/test_trampoline.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_enum.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_enum.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_eval.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_eval.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_eval_call.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_exceptions.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_exceptions.h create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_exceptions.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_factory_constructors.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_factory_constructors.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_gil_scoped.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_gil_scoped.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_iostream.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_iostream.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_kwargs_and_defaults.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_kwargs_and_defaults.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_local_bindings.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_local_bindings.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_methods_and_attributes.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_methods_and_attributes.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_modules.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_modules.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_multiple_inheritance.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_multiple_inheritance.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_numpy_array.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_numpy_array.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_numpy_dtypes.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_numpy_dtypes.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_numpy_vectorize.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_numpy_vectorize.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_opaque_types.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_opaque_types.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_operator_overloading.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_operator_overloading.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_pickling.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_pickling.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_pytypes.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_pytypes.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_sequences_and_iterators.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_sequences_and_iterators.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_smart_ptr.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_smart_ptr.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_stl.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_stl.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_stl_binders.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_stl_binders.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_tagbased_polymorphic.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_tagbased_polymorphic.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_thread.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_thread.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_union.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_union.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_virtual_functions.cpp create mode 100644 toolbox/spconv/third_party/pybind11/tests/test_virtual_functions.py create mode 100644 toolbox/spconv/third_party/pybind11/tests/valgrind-numpy-scipy.supp create mode 100644 toolbox/spconv/third_party/pybind11/tests/valgrind-python.supp create mode 100644 toolbox/spconv/third_party/pybind11/tools/FindCatch.cmake create mode 100644 toolbox/spconv/third_party/pybind11/tools/FindEigen3.cmake create mode 100644 toolbox/spconv/third_party/pybind11/tools/FindPythonLibsNew.cmake create mode 100755 toolbox/spconv/third_party/pybind11/tools/check-style.sh create mode 100644 toolbox/spconv/third_party/pybind11/tools/cmake_uninstall.cmake.in create mode 100644 toolbox/spconv/third_party/pybind11/tools/libsize.py create mode 100755 toolbox/spconv/third_party/pybind11/tools/make_changelog.py create mode 100644 toolbox/spconv/third_party/pybind11/tools/pybind11Common.cmake create mode 100644 toolbox/spconv/third_party/pybind11/tools/pybind11Config.cmake.in create mode 100644 toolbox/spconv/third_party/pybind11/tools/pybind11NewTools.cmake create mode 100644 toolbox/spconv/third_party/pybind11/tools/pybind11Tools.cmake create mode 100644 toolbox/spconv/third_party/pybind11/tools/pyproject.toml create mode 100644 toolbox/spconv/third_party/pybind11/tools/setup_global.py.in create mode 100644 toolbox/spconv/third_party/pybind11/tools/setup_main.py.in diff --git a/cv/3d_detection/Part-A2-Anchor/README.md b/cv/3d_detection/Part-A2-Anchor/README.md new file mode 100644 index 000000000..d247d9ff1 --- /dev/null +++ b/cv/3d_detection/Part-A2-Anchor/README.md @@ -0,0 +1,61 @@ +# Part-A2-Anchor + +## Model description +3D object detection from LiDAR point cloud is a challenging problem in 3D scene understanding and has many practical applications. In this paper, we extend our preliminary work PointRCNN to a novel and strong point-cloud-based 3D object detection framework, the part-aware and aggregation neural network (Part-A2 net). The whole framework consists of the part-aware stage and the part-aggregation stage. Firstly, the part-aware stage for the first time fully utilizes free-of-charge part supervisions derived from 3D ground-truth boxes to simultaneously predict high quality 3D proposals and accurate intra-object part locations. The predicted intra-object part locations within the same proposal are grouped by our new-designed RoI-aware point cloud pooling module, which results in an effective representation to encode the geometry-specific features of each 3D proposal. Then the part-aggregation stage learns to re-score the box and refine the box location by exploring the spatial relationship of the pooled intra-object part locations. Extensive experiments are conducted to demonstrate the performance improvements from each component of our proposed framework. Our Part-A2 net outperforms all existing 3D detection methods and achieves new state-of-the-art on KITTI 3D object detection dataset by utilizing only the LiDAR point cloud data. + +## Step 1: Installation +``` +## install libGL and libboost +yum install mesa-libGL +yum install boost-devel + +# Install spconv +cd toolbox/spconv +bash clean_spconv.sh +bash build_spconv.sh +bash install_spconv.sh + +# Install openpcdet +cd toolbox/openpcdet +pip3 install -r requirements.txt +bash build_openpcdet.sh +bash install_openpcdet.sh +``` + +## Step 2: Preparing datasets +Download the kitti dataset from + +Download the "planes" subdataset from +``` +OpenPCDet +├── data +│ ├── kitti +│ │ │── ImageSets +│ │ │── training +│ │ │ ├──calib & velodyne & label_2 & image_2 & (optional: planes) & (optional: depth_2) +│ │ │── testing +│ │ │ ├──calib & velodyne & image_2 +├── pcdet +├── tools +``` + +``` +# Modify the `DATA_PATH` in the kitti_dataset.yaml to your own +cd toolbox/openpcdet +python3 -m pcdet.datasets.kitti.kitti_dataset create_kitti_infos tools/cfgs/dataset_configs/kitti_dataset.yaml +``` + +## Step 3: Training + +### Single GPU training + +```bash +cd tools +python3 train.py --cfg_file cfgs/kitti_models/PartA2.yaml +``` + +### Multiple GPU training + +```bash +bash scripts/dist_train.sh 16 --cfg_file cfgs/kitti_models/PartA2.yaml +``` \ No newline at end of file diff --git a/cv/3d_detection/Part-A2-Free/README.md b/cv/3d_detection/Part-A2-Free/README.md new file mode 100644 index 000000000..3e2b31d97 --- /dev/null +++ b/cv/3d_detection/Part-A2-Free/README.md @@ -0,0 +1,61 @@ +# Part-A2-Free + +## Model description +In this work, we propose the part-aware and aggregation neural network (PartA2-Net). The whole framework consists of the part-aware stage and the part-aggregation stage. Firstly, the part-aware stage for the first time fully utilizes free-of-charge part supervisions derived from 3D ground-truth boxes to simultaneously predict high quality 3D proposals and accurate intra-object part locations. The predicted intra-object part locations within the same proposal are grouped by our new-designed RoI-aware point cloud pooling module, which results in an effective representation to encode the geometry-specific features of each 3D proposal. Then the part-aggregation stage learns to re-score the box and refine the box location by exploring the spatial relationship of the pooled intra-object part locations. At the time of submission (July-9 2019), our PartA2-Net outperforms all existing 3D detection methods and achieves new state-of-the-art on KITTI 3D object detection learderbaord by utilizing only the LiDAR point cloud data. + +## Step 1: Installation +``` +## install libGL and libboost +yum install mesa-libGL +yum install boost-devel + +# Install spconv +cd toolbox/spconv +bash clean_spconv.sh +bash build_spconv.sh +bash install_spconv.sh + +# Install openpcdet +cd toolbox/openpcdet +pip3 install -r requirements.txt +bash build_openpcdet.sh +bash install_openpcdet.sh +``` + +## Step 2: Preparing datasets +Download the kitti dataset from + +Download the "planes" subdataset from +``` +OpenPCDet +├── data +│ ├── kitti +│ │ │── ImageSets +│ │ │── training +│ │ │ ├──calib & velodyne & label_2 & image_2 & (optional: planes) & (optional: depth_2) +│ │ │── testing +│ │ │ ├──calib & velodyne & image_2 +├── pcdet +├── tools +``` + +``` +# Modify the `DATA_PATH` in the kitti_dataset.yaml to your own +cd toolbox/openpcdet +python3 -m pcdet.datasets.kitti.kitti_dataset create_kitti_infos tools/cfgs/dataset_configs/kitti_dataset.yaml +``` + +## Step 3: Training + +### Single GPU training + +```bash +cd tools +python3 train.py --cfg_file cfgs/kitti_models/PartA2_free.yaml +``` + +### Multiple GPU training + +```bash +bash scripts/dist_train.sh 16 --cfg_file cfgs/kitti_models/PartA2_free.yaml +``` \ No newline at end of file diff --git a/toolbox/openpcdet/.gitignore b/toolbox/openpcdet/.gitignore new file mode 100644 index 000000000..c74eff803 --- /dev/null +++ b/toolbox/openpcdet/.gitignore @@ -0,0 +1,31 @@ +**__pycache__** +**egg-info** +**dist** +data/ +*.pyc +venv/ +*.idea/ +*.so +*.yaml +*.pth +*.pkl +*.zip +*.bin +output +version.py +build/ +build_pip/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg diff --git a/toolbox/openpcdet/LICENSE b/toolbox/openpcdet/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/toolbox/openpcdet/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/toolbox/openpcdet/README-ILUVATAR.md b/toolbox/openpcdet/README-ILUVATAR.md new file mode 100644 index 000000000..f70f5f2de --- /dev/null +++ b/toolbox/openpcdet/README-ILUVATAR.md @@ -0,0 +1,6 @@ +# install +``` +pip3 install -r requirements.txt +bash build_openpcdet.sh +bash install_openpcdet.sh +``` \ No newline at end of file diff --git a/toolbox/openpcdet/README.md b/toolbox/openpcdet/README.md new file mode 100644 index 000000000..779571acb --- /dev/null +++ b/toolbox/openpcdet/README.md @@ -0,0 +1,291 @@ + + +# OpenPCDet + +`OpenPCDet` is a clear, simple, self-contained open source project for LiDAR-based 3D object detection. + +It is also the official code release of [`[PointRCNN]`](https://arxiv.org/abs/1812.04244), [`[Part-A2-Net]`](https://arxiv.org/abs/1907.03670), [`[PV-RCNN]`](https://arxiv.org/abs/1912.13192), [`[Voxel R-CNN]`](https://arxiv.org/abs/2012.15712), [`[PV-RCNN++]`](https://arxiv.org/abs/2102.00463) and [`[MPPNet]`](https://arxiv.org/abs/2205.05979). + +**Highlights**: +* `OpenPCDet` has been updated to `v0.6.0` (Sep. 2022). +* The codes of PV-RCNN++ has been supported. +* The codes of MPPNet has been supported. +* The multi-modal 3D detection approaches on Nuscenes have been supported. + +## Overview +- [Changelog](#changelog) +- [Design Pattern](#openpcdet-design-pattern) +- [Model Zoo](#model-zoo) +- [Installation](docs/INSTALL.md) +- [Quick Demo](docs/DEMO.md) +- [Getting Started](docs/GETTING_STARTED.md) +- [Citation](#citation) + + +## Changelog +[2023-06-30] **NEW:** Added support for [`DSVT`](https://arxiv.org/abs/2301.06051), which achieves state-of-the-art performance on large-scale Waymo Open Dataset with real-time inference speed (27HZ with TensorRT). + +[2023-05-13] **NEW:** Added support for the multi-modal 3D object detection models on Nuscenes dataset. +* Support multi-modal Nuscenes detection (See the [GETTING_STARTED.md](docs/GETTING_STARTED.md) to process data). +* Support [TransFusion-Lidar](https://arxiv.org/abs/2203.11496) head, which ahcieves 69.43% NDS on Nuscenes validation dataset. +* Support [`BEVFusion`](https://arxiv.org/abs/2205.13542), which fuses multi-modal information on BEV space and reaches 70.98% NDS on Nuscenes validation dataset. (see the [guideline](docs/guidelines_of_approaches/bevfusion.md) on how to train/test with BEVFusion). + +[2023-04-02] Added support for [`VoxelNeXt`](https://arxiv.org/abs/2303.11301) on Nuscenes, Waymo, and Argoverse2 datasets. It is a fully sparse 3D object detection network, which is a clean sparse CNNs network and predicts 3D objects directly upon voxels. + +[2022-09-02] **NEW:** Update `OpenPCDet` to v0.6.0: +* Official code release of [`MPPNet`](https://arxiv.org/abs/2205.05979) for temporal 3D object detection, which supports long-term multi-frame 3D object detection and ranks 1st place on [3D detection learderboard](https://waymo.com/open/challenges/2020/3d-detection) of Waymo Open Dataset on Sept. 2th, 2022. For validation dataset, MPPNet achieves 74.96%, 75.06% and 74.52% for vehicle, pedestrian and cyclist classes in terms of mAPH@Level_2. (see the [guideline](docs/guidelines_of_approaches/mppnet.md) on how to train/test with MPPNet). +* Support multi-frame training/testing on Waymo Open Dataset (see the [change log](docs/changelog.md) for more details on how to process data). +* Support to save changing training details (e.g., loss, iter, epoch) to file (previous tqdm progress bar is still supported by using `--use_tqdm_to_record`). Please use `pip install gpustat` if you also want to log the GPU related information. +* Support to save latest model every 5 mintues, so you can restore the model training from latest status instead of previous epoch. + +[2022-08-22] Added support for [custom dataset tutorial and template](docs/CUSTOM_DATASET_TUTORIAL.md) + +[2022-07-05] Added support for the 3D object detection backbone network [`Focals Conv`](https://openaccess.thecvf.com/content/CVPR2022/papers/Chen_Focal_Sparse_Convolutional_Networks_for_3D_Object_Detection_CVPR_2022_paper.pdf). + +[2022-02-12] Added support for using docker. Please refer to the guidance in [./docker](./docker). + +[2022-02-07] Added support for Centerpoint models on Nuscenes Dataset. + +[2022-01-14] Added support for dynamic pillar voxelization, following the implementation proposed in [`H^23D R-CNN`](https://arxiv.org/abs/2107.14391) with unique operation and [`torch_scatter`](https://github.com/rusty1s/pytorch_scatter) package. + +[2022-01-05] **NEW:** Update `OpenPCDet` to v0.5.2: +* The code of [`PV-RCNN++`](https://arxiv.org/abs/2102.00463) has been released to this repo, with higher performance, faster training/inference speed and less memory consumption than PV-RCNN. +* Add performance of several models trained with full training set of [Waymo Open Dataset](#waymo-open-dataset-baselines). +* Support Lyft dataset, see the pull request [here](https://github.com/open-mmlab/OpenPCDet/pull/720). + + +[2021-12-09] **NEW:** Update `OpenPCDet` to v0.5.1: +* Add PointPillar related baseline configs/results on [Waymo Open Dataset](#waymo-open-dataset-baselines). +* Support Pandaset dataloader, see the pull request [here](https://github.com/open-mmlab/OpenPCDet/pull/396). +* Support a set of new augmentations, see the pull request [here](https://github.com/open-mmlab/OpenPCDet/pull/653). + +[2021-12-01] **NEW:** `OpenPCDet` v0.5.0 is released with the following features: +* Improve the performance of all models on [Waymo Open Dataset](#waymo-open-dataset-baselines). Note that you need to re-prepare the training/validation data and ground-truth database of Waymo Open Dataset (see [GETTING_STARTED.md](docs/GETTING_STARTED.md)). +* Support anchor-free [CenterHead](pcdet/models/dense_heads/center_head.py), add configs of `CenterPoint` and `PV-RCNN with CenterHead`. +* Support lastest **PyTorch 1.1~1.10** and **spconv 1.0~2.x**, where **spconv 2.x** should be easy to install with pip and faster than previous version (see the official update of spconv [here](https://github.com/traveller59/spconv)). +* Support config [`USE_SHARED_MEMORY`](tools/cfgs/dataset_configs/waymo_dataset.yaml) to use shared memory to potentially speed up the training process in case you suffer from an IO problem. +* Support better and faster [visualization script](tools/visual_utils/open3d_vis_utils.py), and you need to install [Open3D](https://github.com/isl-org/Open3D) firstly. + +[2021-06-08] Added support for the voxel-based 3D object detection model [`Voxel R-CNN`](#KITTI-3D-Object-Detection-Baselines). + +[2021-05-14] Added support for the monocular 3D object detection model [`CaDDN`](#KITTI-3D-Object-Detection-Baselines). + +[2020-11-27] Bugfixed: Please re-prepare the validation infos of Waymo dataset (version 1.2) if you would like to +use our provided Waymo evaluation tool (see [PR](https://github.com/open-mmlab/OpenPCDet/pull/383)). +Note that you do not need to re-prepare the training data and ground-truth database. + +[2020-11-10] The [Waymo Open Dataset](#waymo-open-dataset-baselines) has been supported with state-of-the-art results. Currently we provide the +configs and results of `SECOND`, `PartA2` and `PV-RCNN` on the Waymo Open Dataset, and more models could be easily supported by modifying their dataset configs. + +[2020-08-10] Bugfixed: The provided NuScenes models have been updated to fix the loading bugs. Please redownload it if you need to use the pretrained NuScenes models. + +[2020-07-30] `OpenPCDet` v0.3.0 is released with the following features: + * The Point-based and Anchor-Free models ([`PointRCNN`](#KITTI-3D-Object-Detection-Baselines), [`PartA2-Free`](#KITTI-3D-Object-Detection-Baselines)) are supported now. + * The NuScenes dataset is supported with strong baseline results ([`SECOND-MultiHead (CBGS)`](#NuScenes-3D-Object-Detection-Baselines) and [`PointPillar-MultiHead`](#NuScenes-3D-Object-Detection-Baselines)). + * High efficiency than last version, support **PyTorch 1.1~1.7** and **spconv 1.0~1.2** simultaneously. + +[2020-07-17] Add simple visualization codes and a quick demo to test with custom data. + +[2020-06-24] `OpenPCDet` v0.2.0 is released with pretty new structures to support more models and datasets. + +[2020-03-16] `OpenPCDet` v0.1.0 is released. + + +## Introduction + + +### What does `OpenPCDet` toolbox do? + +Note that we have upgrated `PCDet` from `v0.1` to `v0.2` with pretty new structures to support various datasets and models. + +`OpenPCDet` is a general PyTorch-based codebase for 3D object detection from point cloud. +It currently supports multiple state-of-the-art 3D object detection methods with highly refactored codes for both one-stage and two-stage 3D detection frameworks. + +Based on `OpenPCDet` toolbox, we win the Waymo Open Dataset challenge in [3D Detection](https://waymo.com/open/challenges/3d-detection/), +[3D Tracking](https://waymo.com/open/challenges/3d-tracking/), [Domain Adaptation](https://waymo.com/open/challenges/domain-adaptation/) +three tracks among all LiDAR-only methods, and the Waymo related models will be released to `OpenPCDet` soon. + +We are actively updating this repo currently, and more datasets and models will be supported soon. +Contributions are also welcomed. + +### `OpenPCDet` design pattern + +* Data-Model separation with unified point cloud coordinate for easily extending to custom datasets: +

+ +

+ +* Unified 3D box definition: (x, y, z, dx, dy, dz, heading). + +* Flexible and clear model structure to easily support various 3D detection models: +

+ +

+ +* Support various models within one framework as: +

+ +

+ + +### Currently Supported Features + +- [x] Support both one-stage and two-stage 3D object detection frameworks +- [x] Support distributed training & testing with multiple GPUs and multiple machines +- [x] Support multiple heads on different scales to detect different classes +- [x] Support stacked version set abstraction to encode various number of points in different scenes +- [x] Support Adaptive Training Sample Selection (ATSS) for target assignment +- [x] Support RoI-aware point cloud pooling & RoI-grid point cloud pooling +- [x] Support GPU version 3D IoU calculation and rotated NMS + + +## Model Zoo + +### KITTI 3D Object Detection Baselines +Selected supported methods are shown in the below table. The results are the 3D detection performance of moderate difficulty on the *val* set of KITTI dataset. +* All LiDAR-based models are trained with 8 GTX 1080Ti GPUs and are available for download. +* The training time is measured with 8 TITAN XP GPUs and PyTorch 1.5. + +| | training time | Car@R11 | Pedestrian@R11 | Cyclist@R11 | download | +|---------------------------------------------|----------:|:-------:|:-------:|:-------:|:---------:| +| [PointPillar](tools/cfgs/kitti_models/pointpillar.yaml) |~1.2 hours| 77.28 | 52.29 | 62.68 | [model-18M](https://drive.google.com/file/d/1wMxWTpU1qUoY3DsCH31WJmvJxcjFXKlm/view?usp=sharing) | +| [SECOND](tools/cfgs/kitti_models/second.yaml) | ~1.7 hours | 78.62 | 52.98 | 67.15 | [model-20M](https://drive.google.com/file/d/1-01zsPOsqanZQqIIyy7FpNXStL3y4jdR/view?usp=sharing) | +| [SECOND-IoU](tools/cfgs/kitti_models/second_iou.yaml) | - | 79.09 | 55.74 | 71.31 | [model-46M](https://drive.google.com/file/d/1AQkeNs4bxhvhDQ-5sEo_yvQUlfo73lsW/view?usp=sharing) | +| [PointRCNN](tools/cfgs/kitti_models/pointrcnn.yaml) | ~3 hours | 78.70 | 54.41 | 72.11 | [model-16M](https://drive.google.com/file/d/1BCX9wMn-GYAfSOPpyxf6Iv6fc0qKLSiU/view?usp=sharing)| +| [PointRCNN-IoU](tools/cfgs/kitti_models/pointrcnn_iou.yaml) | ~3 hours | 78.75 | 58.32 | 71.34 | [model-16M](https://drive.google.com/file/d/1V0vNZ3lAHpEEt0MlT80eL2f41K2tHm_D/view?usp=sharing)| +| [Part-A2-Free](tools/cfgs/kitti_models/PartA2_free.yaml) | ~3.8 hours| 78.72 | 65.99 | 74.29 | [model-226M](https://drive.google.com/file/d/1lcUUxF8mJgZ_e-tZhP1XNQtTBuC-R0zr/view?usp=sharing) | +| [Part-A2-Anchor](tools/cfgs/kitti_models/PartA2.yaml) | ~4.3 hours| 79.40 | 60.05 | 69.90 | [model-244M](https://drive.google.com/file/d/10GK1aCkLqxGNeX3lVu8cLZyE0G8002hY/view?usp=sharing) | +| [PV-RCNN](tools/cfgs/kitti_models/pv_rcnn.yaml) | ~5 hours| 83.61 | 57.90 | 70.47 | [model-50M](https://drive.google.com/file/d/1lIOq4Hxr0W3qsX83ilQv0nk1Cls6KAr-/view?usp=sharing) | +| [Voxel R-CNN (Car)](tools/cfgs/kitti_models/voxel_rcnn_car.yaml) | ~2.2 hours| 84.54 | - | - | [model-28M](https://drive.google.com/file/d/19_jiAeGLz7V0wNjSJw4cKmMjdm5EW5By/view?usp=sharing) | +| [Focals Conv - F](tools/cfgs/kitti_models/voxel_rcnn_car_focal_multimodal.yaml) | ~4 hours| 85.66 | - | - | [model-30M](https://drive.google.com/file/d/1u2Vcg7gZPOI-EqrHy7_6fqaibvRt2IjQ/view?usp=sharing) | +|| +| [CaDDN (Mono)](tools/cfgs/kitti_models/CaDDN.yaml) |~15 hours| 21.38 | 13.02 | 9.76 | [model-774M](https://drive.google.com/file/d/1OQTO2PtXT8GGr35W9m2GZGuqgb6fyU1V/view?usp=sharing) | + +### Waymo Open Dataset Baselines +We provide the setting of [`DATA_CONFIG.SAMPLED_INTERVAL`](tools/cfgs/dataset_configs/waymo_dataset.yaml) on the Waymo Open Dataset (WOD) to subsample partial samples for training and evaluation, +so you could also play with WOD by setting a smaller `DATA_CONFIG.SAMPLED_INTERVAL` even if you only have limited GPU resources. + +By default, all models are trained with **a single frame** of **20% data (~32k frames)** of all the training samples on 8 GTX 1080Ti GPUs, and the results of each cell here are mAP/mAPH calculated by the official Waymo evaluation metrics on the **whole** validation set (version 1.2). + +| Performance@(train with 20\% Data) | Vec_L1 | Vec_L2 | Ped_L1 | Ped_L2 | Cyc_L1 | Cyc_L2 | +|---------------------------------------------|----------:|:-------:|:-------:|:-------:|:-------:|:-------:| +| [SECOND](tools/cfgs/waymo_models/second.yaml) | 70.96/70.34|62.58/62.02|65.23/54.24 |57.22/47.49| 57.13/55.62 | 54.97/53.53 | +| [PointPillar](tools/cfgs/waymo_models/pointpillar_1x.yaml) | 70.43/69.83 | 62.18/61.64 | 66.21/46.32|58.18/40.64|55.26/51.75|53.18/49.80 | +[CenterPoint-Pillar](tools/cfgs/waymo_models/centerpoint_pillar_1x.yaml)| 70.50/69.96|62.18/61.69|73.11/61.97|65.06/55.00|65.44/63.85|62.98/61.46| +[CenterPoint-Dynamic-Pillar](tools/cfgs/waymo_models/centerpoint_dyn_pillar_1x.yaml)| 70.46/69.93|62.06/61.58|73.92/63.35|65.91/56.33|66.24/64.69|63.73/62.24| +[CenterPoint](tools/cfgs/waymo_models/centerpoint_without_resnet.yaml)| 71.33/70.76|63.16/62.65| 72.09/65.49 |64.27/58.23| 68.68/67.39 |66.11/64.87| +| [CenterPoint (ResNet)](tools/cfgs/waymo_models/centerpoint.yaml)|72.76/72.23|64.91/64.42 |74.19/67.96 |66.03/60.34| 71.04/69.79 |68.49/67.28 | +| [Part-A2-Anchor](tools/cfgs/waymo_models/PartA2.yaml) | 74.66/74.12 |65.82/65.32 |71.71/62.24 |62.46/54.06 |66.53/65.18 |64.05/62.75 | +| [PV-RCNN (AnchorHead)](tools/cfgs/waymo_models/pv_rcnn.yaml) | 75.41/74.74 |67.44/66.80 |71.98/61.24 |63.70/53.95 |65.88/64.25 |63.39/61.82 | +| [PV-RCNN (CenterHead)](tools/cfgs/waymo_models/pv_rcnn_with_centerhead_rpn.yaml) | 75.95/75.43 |68.02/67.54 |75.94/69.40 |67.66/61.62 |70.18/68.98 |67.73/66.57| +| [Voxel R-CNN (CenterHead)-Dynamic-Voxel](tools/cfgs/waymo_models/voxel_rcnn_with_centerhead_dyn_voxel.yaml) | 76.13/75.66 |68.18/67.74 |78.20/71.98 |69.29/63.59 | 70.75/69.68 |68.25/67.21| +| [PV-RCNN++](tools/cfgs/waymo_models/pv_rcnn_plusplus.yaml) | 77.82/77.32| 69.07/68.62| 77.99/71.36| 69.92/63.74| 71.80/70.71| 69.31/68.26| +| [PV-RCNN++ (ResNet)](tools/cfgs/waymo_models/pv_rcnn_plusplus_resnet.yaml) |77.61/77.14| 69.18/68.75| 79.42/73.31| 70.88/65.21| 72.50/71.39| 69.84/68.77| + +Here we also provide the performance of several models trained on the full training set (refer to the paper of [PV-RCNN++](https://arxiv.org/abs/2102.00463)): + +| Performance@(train with 100\% Data) | Vec_L1 | Vec_L2 | Ped_L1 | Ped_L2 | Cyc_L1 | Cyc_L2 | +|-------------------------------------------------------------------------------------------|----------:|:-------:|:-------:|:-------:|:-------:|:-------:| +| [SECOND](tools/cfgs/waymo_models/second.yaml) | 72.27/71.69 | 63.85/63.33 | 68.70/58.18 | 60.72/51.31 | 60.62/59.28 | 58.34/57.05 | +| [CenterPoint-Pillar](tools/cfgs/waymo_models/centerpoint_pillar_1x.yaml) | 73.37/72.86 | 65.09/64.62 | 75.35/65.11 | 67.61/58.25 | 67.76/66.22 | 65.25/63.77 | +| [Part-A2-Anchor](tools/cfgs/waymo_models/PartA2.yaml) | 77.05/76.51 | 68.47/67.97 | 75.24/66.87 | 66.18/58.62 | 68.60/67.36 | 66.13/64.93 | +| [VoxelNeXt-2D](tools/cfgs/waymo_models/voxelnext2d_ioubranch.yaml) | 77.94/77.47 |69.68/69.25 |80.24/73.47 |72.23/65.88 |73.33/72.20 |70.66/69.56 | +| [VoxelNeXt](tools/cfgs/waymo_models/voxelnext_ioubranch_large.yaml) | 78.16/77.70 |69.86/69.42 |81.47/76.30 |73.48/68.63 |76.06/74.90 |73.29/72.18 | +| [PV-RCNN (CenterHead)](tools/cfgs/waymo_models/pv_rcnn_with_centerhead_rpn.yaml) | 78.00/77.50 | 69.43/68.98 | 79.21/73.03 | 70.42/64.72 | 71.46/70.27 | 68.95/67.79 | +| [PV-RCNN++](tools/cfgs/waymo_models/pv_rcnn_plusplus.yaml) | 79.10/78.63 | 70.34/69.91 | 80.62/74.62 | 71.86/66.30 | 73.49/72.38 | 70.70/69.62 | +| [PV-RCNN++ (ResNet)](tools/cfgs/waymo_models/pv_rcnn_plusplus_resnet.yaml) | 79.25/78.78 | 70.61/70.18 | 81.83/76.28 | 73.17/68.00 | 73.72/72.66 | 71.21/70.19 | +| [DSVT-Pillar](tools/cfgs/waymo_models/dsvt_pillar.yaml) | 79.44/78.97 | 71.24/70.81 | 83.00/77.22 | 75.45/69.95 | 76.70/75.70 | 73.83/72.86 | +| [DSVT-Voxel](tools/cfgs/waymo_models/dsvt_voxel.yaml) | 79.77/79.31 | 71.67/71.25 | 83.75/78.92 | 76.21/71.57 | 77.57/76.58 | 74.70/73.73 | +| [PV-RCNN++ (ResNet, 2 frames)](tools/cfgs/waymo_models/pv_rcnn_plusplus_resnet_2frames.yaml) | 80.17/79.70 | 72.14/71.70 | 83.48/80.42 | 75.54/72.61 | 74.63/73.75 | 72.35/71.50 | +| [MPPNet (4 frames)](docs/guidelines_of_approaches/mppnet.md) | 81.54/81.06 | 74.07/73.61 | 84.56/81.94 | 77.20/74.67 | 77.15/76.50 | 75.01/74.38 | +| [MPPNet (16 frames)](docs/guidelines_of_approaches/mppnet.md) | 82.74/82.28 | 75.41/74.96 | 84.69/82.25 | 77.43/75.06 | 77.28/76.66 | 75.13/74.52 | + + + + + + + +We could not provide the above pretrained models due to [Waymo Dataset License Agreement](https://waymo.com/open/terms/), +but you could easily achieve similar performance by training with the default configs. + +### NuScenes 3D Object Detection Baselines +All models are trained with 8 GPUs and are available for download. For training BEVFusion, please refer to the [guideline](docs/guidelines_of_approaches/bevfusion.md). + +| | mATE | mASE | mAOE | mAVE | mAAE | mAP | NDS | download | +|----------------------------------------------------------------------------------------------------|-------:|:------:|:------:|:-----:|:-----:|:-----:|:------:|:--------------------------------------------------------------------------------------------------:| +| [PointPillar-MultiHead](tools/cfgs/nuscenes_models/cbgs_pp_multihead.yaml) | 33.87 | 26.00 | 32.07 | 28.74 | 20.15 | 44.63 | 58.23 | [model-23M](https://drive.google.com/file/d/1p-501mTWsq0G9RzroTWSXreIMyTUUpBM/view?usp=sharing) | +| [SECOND-MultiHead (CBGS)](tools/cfgs/nuscenes_models/cbgs_second_multihead.yaml) | 31.15 | 25.51 | 26.64 | 26.26 | 20.46 | 50.59 | 62.29 | [model-35M](https://drive.google.com/file/d/1bNzcOnE3u9iooBFMk2xK7HqhdeQ_nwTq/view?usp=sharing) | +| [CenterPoint-PointPillar](tools/cfgs/nuscenes_models/cbgs_dyn_pp_centerpoint.yaml) | 31.13 | 26.04 | 42.92 | 23.90 | 19.14 | 50.03 | 60.70 | [model-23M](https://drive.google.com/file/d/1UvGm6mROMyJzeSRu7OD1leU_YWoAZG7v/view?usp=sharing) | +| [CenterPoint (voxel_size=0.1)](tools/cfgs/nuscenes_models/cbgs_voxel01_res3d_centerpoint.yaml) | 30.11 | 25.55 | 38.28 | 21.94 | 18.87 | 56.03 | 64.54 | [model-34M](https://drive.google.com/file/d/1Cz-J1c3dw7JAWc25KRG1XQj8yCaOlexQ/view?usp=sharing) | +| [CenterPoint (voxel_size=0.075)](tools/cfgs/nuscenes_models/cbgs_voxel0075_res3d_centerpoint.yaml) | 28.80 | 25.43 | 37.27 | 21.55 | 18.24 | 59.22 | 66.48 | [model-34M](https://drive.google.com/file/d/1XOHAWm1MPkCKr1gqmc3TWi5AYZgPsgxU/view?usp=sharing) | +| [VoxelNeXt (voxel_size=0.075)](tools/cfgs/nuscenes_models/cbgs_voxel0075_voxelnext.yaml) | 30.11 | 25.23 | 40.57 | 21.69 | 18.56 | 60.53 | 66.65 | [model-31M](https://drive.google.com/file/d/1IV7e7G9X-61KXSjMGtQo579pzDNbhwvf/view?usp=share_link) | +| [TransFusion-L*](tools/cfgs/nuscenes_models/transfusion_lidar.yaml) | 27.96 | 25.37 | 29.35 | 27.31 | 18.55 | 64.58 | 69.43 | [model-32M](https://drive.google.com/file/d/1cuZ2qdDnxSwTCsiXWwbqCGF-uoazTXbz/view?usp=share_link) | +| [BEVFusion](tools/cfgs/nuscenes_models/bevfusion.yaml) | 28.03 | 25.43 | 30.19 | 26.76 | 18.48 | 67.75 | 70.98 | [model-157M](https://drive.google.com/file/d/1X50b-8immqlqD8VPAUkSKI0Ls-4k37g9/view?usp=share_link) | + +*: Use the fade strategy, which disables data augmentations in the last several epochs during training. + +### ONCE 3D Object Detection Baselines +All models are trained with 8 GPUs. + +| | Vehicle | Pedestrian | Cyclist | mAP | +| ------------------------------------------------------ | :-----: | :--------: | :-----: | :----: | +| [PointRCNN](tools/cfgs/once_models/pointrcnn.yaml) | 52.09 | 4.28 | 29.84 | 28.74 | +| [PointPillar](tools/cfgs/once_models/pointpillar.yaml) | 68.57 | 17.63 | 46.81 | 44.34 | +| [SECOND](tools/cfgs/once_models/second.yaml) | 71.19 | 26.44 | 58.04 | 51.89 | +| [PV-RCNN](tools/cfgs/once_models/pv_rcnn.yaml) | 77.77 | 23.50 | 59.37 | 53.55 | +| [CenterPoint](tools/cfgs/once_models/centerpoint.yaml) | 78.02 | 49.74 | 67.22 | 64.99 | + +### Argoverse2 3D Object Detection Baselines +All models are trained with 4 GPUs. + +| | mAP | download | +|---------------------------------------------------------|:----:|:--------------------------------------------------------------------------------------------------:| +| [VoxelNeXt](tools/cfgs/argo2_models/cbgs_voxel01_voxelnext.yaml) | 30.5 | [model-32M](https://drive.google.com/file/d/1YP2UOz-yO-cWfYQkIqILEu6bodvCBVrR/view?usp=share_link) | + +### Other datasets +Welcome to support other datasets by submitting pull request. + +## Installation + +Please refer to [INSTALL.md](docs/INSTALL.md) for the installation of `OpenPCDet`. + + +## Quick Demo +Please refer to [DEMO.md](docs/DEMO.md) for a quick demo to test with a pretrained model and +visualize the predicted results on your custom data or the original KITTI data. + +## Getting Started + +Please refer to [GETTING_STARTED.md](docs/GETTING_STARTED.md) to learn more usage about this project. + + +## License + +`OpenPCDet` is released under the [Apache 2.0 license](LICENSE). + +## Acknowledgement +`OpenPCDet` is an open source project for LiDAR-based 3D scene perception that supports multiple +LiDAR-based perception models as shown above. Some parts of `PCDet` are learned from the official released codes of the above supported methods. +We would like to thank for their proposed methods and the official implementation. + +We hope that this repo could serve as a strong and flexible codebase to benefit the research community by speeding up the process of reimplementing previous works and/or developing new methods. + + +## Citation +If you find this project useful in your research, please consider cite: + + +``` +@misc{openpcdet2020, + title={OpenPCDet: An Open-source Toolbox for 3D Object Detection from Point Clouds}, + author={OpenPCDet Development Team}, + howpublished = {\url{https://github.com/open-mmlab/OpenPCDet}}, + year={2020} +} +``` + +## Contribution +Welcome to be a member of the OpenPCDet development team by contributing to this repo, and feel free to contact us for any potential contributions. + + diff --git a/toolbox/openpcdet/build_openpcdet.sh b/toolbox/openpcdet/build_openpcdet.sh new file mode 100644 index 000000000..473ebbb17 --- /dev/null +++ b/toolbox/openpcdet/build_openpcdet.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +COREX_VERSION=${COREX_VERSION:-latest} +MAX_JOBS=${MAX_JOBS:-$(nproc --all)} +PYTHON_PATH=$(which python3) +${PYTHON_PATH} -m pip list | grep "^torch .*+corex" || { + echo "ERROR: building mmcv requries the corex torch has been installed." + exit 1 +} + +export MAX_JOBS=${MAX_JOBS} + +${PYTHON_PATH} setup.py bdist_wheel -d build_pip || exit +# Return 0 status if all finished +exit 0 + diff --git a/toolbox/openpcdet/clean_openpcdet.sh b/toolbox/openpcdet/clean_openpcdet.sh new file mode 100644 index 000000000..feda20c48 --- /dev/null +++ b/toolbox/openpcdet/clean_openpcdet.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +PYTHON_PATH=$(which python3) + +rm -rf build +${PYTHON_PATH} setup.py clean || true +rm -rf build_pip + +# Return 0 status if all finished +exit 0 \ No newline at end of file diff --git a/toolbox/openpcdet/docker/Dockerfile b/toolbox/openpcdet/docker/Dockerfile new file mode 100644 index 000000000..3f0114f70 --- /dev/null +++ b/toolbox/openpcdet/docker/Dockerfile @@ -0,0 +1,55 @@ +FROM nvidia/cuda:10.2-cudnn7-devel-ubuntu18.04 + +RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections + +# Install basics +RUN apt-get update -y \ + && apt-get install build-essential \ + && apt-get install -y apt-utils git curl ca-certificates bzip2 tree htop wget \ + && apt-get install -y libglib2.0-0 libsm6 libxext6 libxrender-dev bmon iotop g++ python3.7 python3.7-dev python3.7-distutils + +# Install cmake v3.13.2 +RUN apt-get purge -y cmake && \ + mkdir /root/temp && \ + cd /root/temp && \ + wget https://github.com/Kitware/CMake/releases/download/v3.13.2/cmake-3.13.2.tar.gz && \ + tar -xzvf cmake-3.13.2.tar.gz && \ + cd cmake-3.13.2 && \ + bash ./bootstrap && \ + make && \ + make install && \ + cmake --version && \ + rm -rf /root/temp + +# Install python +RUN ln -sv /usr/bin/python3.7 /usr/bin/python +RUN wget https://bootstrap.pypa.io/get-pip.py && \ + python get-pip.py && \ + rm get-pip.py + +# Install python packages +RUN PIP_INSTALL="python -m pip --no-cache-dir install" && \ + $PIP_INSTALL numpy==1.19.3 llvmlite numba + +# Install torch and torchvision +# See https://pytorch.org/ for other options if you use a different version of CUDA +RUN pip install --user torch==1.6 torchvision==0.7.0 -f https://download.pytorch.org/whl/cu102/torch_stable.html + +# Install python packages +RUN PIP_INSTALL="python -m pip --no-cache-dir install" && \ + $PIP_INSTALL tensorboardX easydict pyyaml scikit-image tqdm SharedArray six + +WORKDIR /root + +# Install Boost geometry +RUN wget https://jaist.dl.sourceforge.net/project/boost/boost/1.68.0/boost_1_68_0.tar.gz && \ + tar xzvf boost_1_68_0.tar.gz && \ + cp -r ./boost_1_68_0/boost /usr/include && \ + rm -rf ./boost_1_68_0 && \ + rm -rf ./boost_1_68_0.tar.gz + +# A weired problem that hasn't been solved yet +RUN pip uninstall -y SharedArray && \ + pip install SharedArray + +RUN pip install spconv-cu102 \ No newline at end of file diff --git a/toolbox/openpcdet/docker/README.md b/toolbox/openpcdet/docker/README.md new file mode 100644 index 000000000..5e258fbed --- /dev/null +++ b/toolbox/openpcdet/docker/README.md @@ -0,0 +1,25 @@ +# Guidance to use OpenPCDet with docker + +You can either build the docker image through Dockerfile or pull the docker image from dockerhub. Please make sure nvidia-docker is corretly installed. + +## Build Through Dockerfile +Build docker image that support OpenPCDet through: +```shell script +docker build ./ -t openpcdet-docker +``` +Note that if you would like to use dynamic voxelization, you need further install [`torch_scatter`](https://github.com/rusty1s/pytorch_scatter) package. + +From this Dockerfile, the installed version of spconv is 2.x, if you would like to use spconv 1.2.1, please follow these steps: +```shell script +git clone -b v1.2.1 https://github.com/djiajunustc/spconv spconv --recursive +cd spconv +python setup.py bdist_wheel +cd ./dist +pip install *.whl +``` + +## Pull From Dockerhub +Run the following script to pull the docker image: +```shell script +docker pull djiajun1206/pcdet:pytorch1.6 +``` diff --git a/toolbox/openpcdet/docker/cu116.Dockerfile b/toolbox/openpcdet/docker/cu116.Dockerfile new file mode 100644 index 000000000..f68e4bdf7 --- /dev/null +++ b/toolbox/openpcdet/docker/cu116.Dockerfile @@ -0,0 +1,83 @@ +FROM nvidia/cuda:11.6.2-devel-ubuntu20.04 + +# Set environment variables +ENV NVENCODE_CFLAGS "-I/usr/local/cuda/include" +ENV CV_VERSION=4.2.0 +ENV DEBIAN_FRONTEND=noninteractive + +# Get all dependencies +RUN apt-get update && apt-get install -y \ + git zip unzip libssl-dev libcairo2-dev lsb-release libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev software-properties-common \ + build-essential cmake pkg-config libapr1-dev autoconf automake libtool curl libc6 libboost-all-dev debconf libomp5 libstdc++6 \ + libqt5core5a libqt5xml5 libqt5gui5 libqt5widgets5 libqt5concurrent5 libqt5opengl5 libcap2 libusb-1.0-0 libatk-adaptor neovim \ + python3-pip python3-tornado python3-dev python3-numpy python3-virtualenv libpcl-dev libgoogle-glog-dev libgflags-dev libatlas-base-dev \ + libsuitesparse-dev python3-pcl pcl-tools libgtk2.0-dev libavcodec-dev libavformat-dev libswscale-dev libtbb2 libtbb-dev libjpeg-dev \ + libpng-dev libtiff-dev libdc1394-22-dev xfce4-terminal &&\ + rm -rf /var/lib/apt/lists/* + +# OpenCV with CUDA support +WORKDIR /opencv +RUN git clone https://github.com/opencv/opencv.git -b $CV_VERSION &&\ + git clone https://github.com/opencv/opencv_contrib.git -b $CV_VERSION + +# While using OpenCV 4.2.0 we have to apply some fixes to ensure that CUDA is fully supported, thanks @https://github.com/gismo07 for this fix +RUN mkdir opencvfix && cd opencvfix &&\ + git clone https://github.com/opencv/opencv.git -b 4.5.2 &&\ + cd opencv/cmake &&\ + cp -r FindCUDA /opencv/opencv/cmake/ &&\ + cp FindCUDA.cmake /opencv/opencv/cmake/ &&\ + cp FindCUDNN.cmake /opencv/opencv/cmake/ &&\ + cp OpenCVDetectCUDA.cmake /opencv/opencv/cmake/ + +WORKDIR /opencv/opencv/build + +RUN cmake -D CMAKE_BUILD_TYPE=RELEASE \ +-D CMAKE_INSTALL_PREFIX=/usr/local \ +-D OPENCV_GENERATE_PKGCONFIG=ON \ +-D BUILD_EXAMPLES=OFF \ +-D INSTALL_PYTHON_EXAMPLES=OFF \ +-D INSTALL_C_EXAMPLES=OFF \ +-D PYTHON_EXECUTABLE=$(which python2) \ +-D PYTHON3_EXECUTABLE=$(which python3) \ +-D PYTHON3_INCLUDE_DIR=$(python3 -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") \ +-D PYTHON3_PACKAGES_PATH=$(python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") \ +-D BUILD_opencv_python2=ON \ +-D BUILD_opencv_python3=ON \ +-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules/ \ +-D WITH_GSTREAMER=ON \ +-D WITH_CUDA=ON \ +-D ENABLE_PRECOMPILED_HEADERS=OFF \ +.. &&\ +make -j$(nproc) &&\ +make install &&\ +ldconfig &&\ +rm -rf /opencv + +WORKDIR / +ENV OpenCV_DIR=/usr/share/OpenCV + + +# PyTorch for CUDA 11.6 +RUN pip3 install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 +ENV TORCH_CUDA_ARCH_LIST="3.5;5.0;6.0;6.1;7.0;7.5;8.0;8.6+PTX" + +# OpenPCDet +RUN pip3 install numpy==1.23.0 llvmlite numba tensorboardX easydict pyyaml scikit-image tqdm SharedArray open3d mayavi av2 kornia pyquaternion +RUN pip3 install spconv-cu116 + +RUN git clone https://github.com/open-mmlab/OpenPCDet.git + +WORKDIR OpenPCDet + +RUN python3 setup.py develop + +WORKDIR / + +ENV NVIDIA_VISIBLE_DEVICES="all" \ + OpenCV_DIR=/usr/share/OpenCV \ + NVIDIA_DRIVER_CAPABILITIES="video,compute,utility,graphics" \ + LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/lib:/usr/lib:/usr/local/lib \ + QT_GRAPHICSSYSTEM="native" + +# Build instructions: docker build -f minimal.Dockerfile -t openpcdet:cuda11 . +# Start instructions: xhost local:root && docker run -it --rm -e SDL_VIDEODRIVER=x11 -e DISPLAY=$DISPLAY --env='DISPLAY' --gpus all --ipc host --privileged --network host -p 8080:8081 -v /tmp/.X11-unix:/tmp/.X11-unix:rw -v file_locations:/storage -v /weights:/weights openpcdet:cuda11 xfce4-terminal --title=openPCDet diff --git a/toolbox/openpcdet/docs/CUSTOM_DATASET_TUTORIAL.md b/toolbox/openpcdet/docs/CUSTOM_DATASET_TUTORIAL.md new file mode 100644 index 000000000..edaee47ce --- /dev/null +++ b/toolbox/openpcdet/docs/CUSTOM_DATASET_TUTORIAL.md @@ -0,0 +1,108 @@ +# Custom Dataset Tutorial +For the custom dataset template, we only consider the basic scenario: raw point clouds and +their corresponding annotations. Point clouds are supposed to be stored in `.npy` format. + +## Label format +We only consider the most basic information -- category and bounding box in the label template. +Annotations are stored in the `.txt`. Each line represents a box in a given scene as below: +``` +# format: [x y z dx dy dz heading_angle category_name] +1.50 1.46 0.10 5.12 1.85 4.13 1.56 Vehicle +5.54 0.57 0.41 1.08 0.74 1.95 1.57 Pedestrian +``` +The box should in the unified 3D box definition (see [README](../README.md)) + +## Files structure +Files should be placed as the following folder structure: +``` +OpenPCDet +├── data +│ ├── custom +│ │ │── ImageSets +│ │ │ │── train.txt +│ │ │ │── val.txt +│ │ │── points +│ │ │ │── 000000.npy +│ │ │ │── 999999.npy +│ │ │── labels +│ │ │ │── 000000.txt +│ │ │ │── 999999.txt +├── pcdet +├── tools +``` +Dataset splits need to be pre-defined and placed in `ImageSets` + +## Hyper-parameters Configurations + +### Point cloud features +Modify following configurations in `custom_dataset.yaml` to +suit your own point clouds. +```yaml +POINT_FEATURE_ENCODING: { + encoding_type: absolute_coordinates_encoding, + used_feature_list: ['x', 'y', 'z', 'intensity'], + src_feature_list: ['x', 'y', 'z', 'intensity'], +} +... +# In gt_sampling data augmentation +NUM_POINT_FEATURES: 4 + +``` + +#### Point cloud range and voxel sizes +For voxel based detectors such as SECOND, PV-RCNN and CenterPoint, the point cloud range and voxel size should follow: +1. Point cloud range along z-axis / voxel_size is 40 +2. Point cloud range along x&y-axis / voxel_size is the multiple of 16. + +Notice that the second rule also suit pillar based detectors such as PointPillar and CenterPoint-Pillar. + +### Category names and anchor sizes +Category names and anchor size are need to be adapted to custom datasets. + ```yaml +CLASS_NAMES: ['Vehicle', 'Pedestrian', 'Cyclist'] +... +MAP_CLASS_TO_KITTI: { + 'Vehicle': 'Car', + 'Pedestrian': 'Pedestrian', + 'Cyclist': 'Cyclist', +} +... +'anchor_sizes': [[3.9, 1.6, 1.56]], +... +# In gt sampling data augmentation +PREPARE: { + filter_by_min_points: ['Vehicle:5', 'Pedestrian:5', 'Cyclist:5'], + filter_by_difficulty: [-1], +} +SAMPLE_GROUPS: ['Vehicle:20','Pedestrian:15', 'Cyclist:15'] +... + ``` +In addition, please also modify the default category names for creating infos in `custom_dataset.py` +``` +create_custom_infos( + dataset_cfg=dataset_cfg, + class_names=['Vehicle', 'Pedestrian', 'Cyclist'], + data_path=ROOT_DIR / 'data' / 'custom', + save_path=ROOT_DIR / 'data' / 'custom', +) +``` + + +## Create data info +Generate the data infos by running the following command: +```shell +python -m pcdet.datasets.custom.custom_dataset create_custom_infos tools/cfgs/dataset_configs/custom_dataset.yaml +``` + + +## Evaluation +Here, we only provide an implementation for KITTI stype evaluation. +The category mapping between custom dataset and KITTI need to be defined +in the `custom_dataset.yaml` +```yaml +MAP_CLASS_TO_KITTI: { + 'Vehicle': 'Car', + 'Pedestrian': 'Pedestrian', + 'Cyclist': 'Cyclist', +} +``` diff --git a/toolbox/openpcdet/docs/DEMO.md b/toolbox/openpcdet/docs/DEMO.md new file mode 100644 index 000000000..c7554302d --- /dev/null +++ b/toolbox/openpcdet/docs/DEMO.md @@ -0,0 +1,51 @@ +# Quick Demo + +Here we provide a quick demo to test a pretrained model on the custom point cloud data and visualize the predicted results. + +We suppose you already followed the [INSTALL.md](INSTALL.md) to install the `OpenPCDet` repo successfully. + +1. Download the provided pretrained models as shown in the [README.md](../README.md). + +2. Make sure you have already installed the [`Open3D`](https://github.com/isl-org/Open3D) (faster) or `mayavi` visualization tools. +If not, you could install it as follows: + ``` + pip install open3d + # or + pip install mayavi + ``` + +3. Prepare your custom point cloud data (skip this step if you use the original KITTI data). + * You need to transform the coordinate of your custom point cloud to +the unified normative coordinate of `OpenPCDet`, that is, x-axis points towards to front direction, +y-axis points towards to the left direction, and z-axis points towards to the top direction. + * (Optional) the z-axis origin of your point cloud coordinate should be about 1.6m above the ground surface, + since currently the provided models are trained on the KITTI dataset. + * Set the intensity information, and save your transformed custom data to `numpy file`: + ```python + # Transform your point cloud data + ... + + # Save it to the file. + # The shape of points should be (num_points, 4), that is [x, y, z, intensity] (Only for KITTI dataset). + # If you doesn't have the intensity information, just set them to zeros. + # If you have the intensity information, you should normalize them to [0, 1]. + points[:, 3] = 0 + np.save(`my_data.npy`, points) + ``` + +4. Run the demo with a pretrained model (e.g. PV-RCNN) and your custom point cloud data as follows: +```shell +python demo.py --cfg_file cfgs/kitti_models/pv_rcnn.yaml \ + --ckpt pv_rcnn_8369.pth \ + --data_path ${POINT_CLOUD_DATA} +``` +Here `${POINT_CLOUD_DATA}` could be in any of the following format: +* Your transformed custom data with a single numpy file like `my_data.npy`. +* Your transformed custom data with a directory to test with multiple point cloud data. +* The original KITTI `.bin` data within `data/kitti`, like `data/kitti/training/velodyne/000008.bin`. + +Then you could see the predicted results with visualized point cloud as follows: + +

+ +

diff --git a/toolbox/openpcdet/docs/GETTING_STARTED.md b/toolbox/openpcdet/docs/GETTING_STARTED.md new file mode 100644 index 000000000..9bc558da9 --- /dev/null +++ b/toolbox/openpcdet/docs/GETTING_STARTED.md @@ -0,0 +1,273 @@ +# Getting Started +The dataset configs are located within [tools/cfgs/dataset_configs](../tools/cfgs/dataset_configs), +and the model configs are located within [tools/cfgs](../tools/cfgs) for different datasets. + + +## Dataset Preparation + +Currently we provide the dataloader of KITTI, NuScenes, Waymo, Lyft and Pandaset. If you want to use a custom dataset, Please refer to our [custom dataset template](CUSTOM_DATASET_TUTORIAL.md). + +### KITTI Dataset +* Please download the official [KITTI 3D object detection](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d) dataset and organize the downloaded files as follows (the road planes could be downloaded from [[road plane]](https://drive.google.com/file/d/1d5mq0RXRnvHPVeKx6Q612z0YRO1t2wAp/view?usp=sharing), which are optional for data augmentation in the training): +* If you would like to train [CaDDN](../tools/cfgs/kitti_models/CaDDN.yaml), download the precomputed [depth maps](https://drive.google.com/file/d/1qFZux7KC_gJ0UHEg-qGJKqteE9Ivojin/view?usp=sharing) for the KITTI training set +* NOTE: if you already have the data infos from `pcdet v0.1`, you can choose to use the old infos and set the DATABASE_WITH_FAKELIDAR option in tools/cfgs/dataset_configs/kitti_dataset.yaml as True. The second choice is that you can create the infos and gt database again and leave the config unchanged. + +``` +OpenPCDet +├── data +│ ├── kitti +│ │ │── ImageSets +│ │ │── training +│ │ │ ├──calib & velodyne & label_2 & image_2 & (optional: planes) & (optional: depth_2) +│ │ │── testing +│ │ │ ├──calib & velodyne & image_2 +├── pcdet +├── tools +``` + +* Generate the data infos by running the following command: +```python +python -m pcdet.datasets.kitti.kitti_dataset create_kitti_infos tools/cfgs/dataset_configs/kitti_dataset.yaml +``` + +### NuScenes Dataset +* Please download the official [NuScenes 3D object detection dataset](https://www.nuscenes.org/download) and +organize the downloaded files as follows: +``` +OpenPCDet +├── data +│ ├── nuscenes +│ │ │── v1.0-trainval (or v1.0-mini if you use mini) +│ │ │ │── samples +│ │ │ │── sweeps +│ │ │ │── maps +│ │ │ │── v1.0-trainval +├── pcdet +├── tools +``` + +* Install the `nuscenes-devkit` with version `1.0.5` by running the following command: +```shell script +pip install nuscenes-devkit==1.0.5 +``` + +* Generate the data infos by running the following command (it may take several hours): +```python +# for lidar-only setting +python -m pcdet.datasets.nuscenes.nuscenes_dataset --func create_nuscenes_infos \ + --cfg_file tools/cfgs/dataset_configs/nuscenes_dataset.yaml \ + --version v1.0-trainval + +# for multi-modal setting +python -m pcdet.datasets.nuscenes.nuscenes_dataset --func create_nuscenes_infos \ + --cfg_file tools/cfgs/dataset_configs/nuscenes_dataset.yaml \ + --version v1.0-trainval \ + --with_cam +``` + +### Waymo Open Dataset +* Please download the official [Waymo Open Dataset](https://waymo.com/open/download/), +including the training data `training_0000.tar~training_0031.tar` and the validation +data `validation_0000.tar~validation_0007.tar`. +* Unzip all the above `xxxx.tar` files to the directory of `data/waymo/raw_data` as follows (You could get 798 *train* tfrecord and 202 *val* tfrecord ): +``` +OpenPCDet +├── data +│ ├── waymo +│ │ │── ImageSets +│ │ │── raw_data +│ │ │ │── segment-xxxxxxxx.tfrecord +| | | |── ... +| | |── waymo_processed_data_v0_5_0 +│ │ │ │── segment-xxxxxxxx/ +| | | |── ... +│ │ │── waymo_processed_data_v0_5_0_gt_database_train_sampled_1/ (old, for single-frame) +│ │ │── waymo_processed_data_v0_5_0_waymo_dbinfos_train_sampled_1.pkl (old, for single-frame) +│ │ │── waymo_processed_data_v0_5_0_gt_database_train_sampled_1_global.npy (optional, old, for single-frame) +│ │ │── waymo_processed_data_v0_5_0_infos_train.pkl (optional) +│ │ │── waymo_processed_data_v0_5_0_infos_val.pkl (optional) +| | |── waymo_processed_data_v0_5_0_gt_database_train_sampled_1_multiframe_-4_to_0 (new, for single/multi-frame) +│ │ │── waymo_processed_data_v0_5_0_waymo_dbinfos_train_sampled_1_multiframe_-4_to_0.pkl (new, for single/multi-frame) +│ │ │── waymo_processed_data_v0_5_0_gt_database_train_sampled_1_multiframe_-4_to_0_global.np (new, for single/multi-frame) + +├── pcdet +├── tools +``` +* Install the official `waymo-open-dataset` by running the following command: +```shell script +pip3 install --upgrade pip +# tf 2.0.0 +pip3 install waymo-open-dataset-tf-2-5-0 --user +``` + +* Extract point cloud data from tfrecord and generate data infos by running the following command (it takes several hours, +and you could refer to `data/waymo/waymo_processed_data_v0_5_0` to see how many records that have been processed): +```python +# only for single-frame setting +python -m pcdet.datasets.waymo.waymo_dataset --func create_waymo_infos \ + --cfg_file tools/cfgs/dataset_configs/waymo_dataset.yaml + +# for single-frame or multi-frame setting +python -m pcdet.datasets.waymo.waymo_dataset --func create_waymo_infos \ + --cfg_file tools/cfgs/dataset_configs/waymo_dataset_multiframe.yaml +# Ignore 'CUDA_ERROR_NO_DEVICE' error as this process does not require GPU. +``` + +Note that you do not need to install `waymo-open-dataset` if you have already processed the data before and do not need to evaluate with official Waymo Metrics. + +### Argoverse2 Dataset +* Download the **Argoverse 2 Sensor Dataset** from the [official website](https://www.argoverse.org/av2.html#download-link), and then extract them. +* Install the official API of Argoverse 2 +```shell +pip install av2==0.2.0 +``` +* Generate info files for `train` and `val`. +```python +python -m pcdet.datasets.argo2.argo2_dataset --root_path data/argo2/sensor --output_dir data/argo2 +``` +- Note that this [issue](https://github.com/argoverse/av2-api/issues/102) from the argo2 api might be noticed. +- If the CPU memory of your machine is limited, you can set `--workers=0` in the training script. +- The organized files are as follows: +``` +OpenPCDet +├── data +│ ├── argo2 +│ │ │── ImageSets +│ │ │ ├──train.txt & val.txt +│ │ │── training +│ │ │ ├──velodyne +│ │ │── sensor +│ │ │ ├──val +│ │ │── argo2_infos_train.pkl +│ │ │── argo2_infos_val.pkl +│ │ │── val_anno.feather + +├── pcdet +├── tools +``` + + +### ONCE Dataset +* Please download train/val/test of the official [ONCE Dataset](https://once-for-auto-driving.github.io/download.html#downloads) and +organize the downloaded files as follows: +* Note that the whole dataset is large (2TB) and most scenes are unlabeled, so if you only need ONCE for supervised 3D object detection and model development, you can just download the training/validation/testing split. If you use ONCE for semi-supervised/self-supervised 3D object detection, you can choose to download the respective unlabeled splits (unlabeled small split: 100k unlabeled scenes; unlabeled medium split: 500k unlabeled scenes; unlabeled large split: 1M unlabeled scenes). +``` +ONCE_Benchmark +├── data +│ ├── once +│ │ │── ImageSets +| | | ├──train.txt +| | | ├──val.txt +| | | ├──test.txt +| | | ├──raw_small.txt (100k unlabeled) +| | | ├──raw_medium.txt (500k unlabeled) +| | | ├──raw_large.txt (1M unlabeled) +│ │ │── data +│ │ │ ├──000000 +| | | | |──000000.json (infos) +| | | | |──lidar_roof (point clouds) +| | | | | |──frame_timestamp_1.bin +| | | | | ... +| | | | |──cam0[1-9] (images) +| | | | | |──frame_timestamp_1.jpg +| | | | | ... +| | | | ... +├── pcdet +├── tools +``` + +* Generate the data infos by running the following command: +```python +python -m pcdet.datasets.once.once_dataset --func create_once_infos --cfg_file tools/cfgs/dataset_configs/once_dataset.yaml +``` + + +### Lyft Dataset +* Please download the official [Lyft Level5 perception dataset](https://level-5.global/data/perception) and +organize the downloaded files as follows: +``` +OpenPCDet +├── data +│ ├── lyft +│ │ │── ImageSets +│ │ │── trainval +│ │ │ │── data & maps(train_maps) & images(train_images) & lidar(train_lidar) & train_lidar +│ │ │── test +│ │ │ │── data & maps(test_maps) & test_images & test_lidar +├── pcdet +├── tools +``` + +* Install the `lyft-dataset-sdk` with version `0.0.8` by running the following command: +```shell script +pip install -U lyft_dataset_sdk==0.0.8 +``` + +* Generate the training & validation data infos by running the following command (it may take several hours): +```python +python -m pcdet.datasets.lyft.lyft_dataset --func create_lyft_infos \ + --cfg_file tools/cfgs/dataset_configs/lyft_dataset.yaml +``` +* Generate the test data infos by running the following command: +```python +python -m pcdet.datasets.lyft.lyft_dataset --func create_lyft_infos \ + --cfg_file tools/cfgs/dataset_configs/lyft_dataset.yaml --version test +``` + +* You need to check carefully since we don't provide a benchmark for it. + + +## Pretrained Models +If you would like to train [CaDDN](../tools/cfgs/kitti_models/CaDDN.yaml), download the pretrained [DeepLabV3 model](https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth) and place within the `checkpoints` directory. Please make sure the [kornia](https://github.com/kornia/kornia) is installed since it is needed for `CaDDN`. +``` +OpenPCDet +├── checkpoints +│ ├── deeplabv3_resnet101_coco-586e9e4e.pth +├── data +├── pcdet +├── tools +``` + +## Training & Testing + + +### Test and evaluate the pretrained models +* Test with a pretrained model: +```shell script +python test.py --cfg_file ${CONFIG_FILE} --batch_size ${BATCH_SIZE} --ckpt ${CKPT} +``` + +* To test all the saved checkpoints of a specific training setting and draw the performance curve on the Tensorboard, add the `--eval_all` argument: +```shell script +python test.py --cfg_file ${CONFIG_FILE} --batch_size ${BATCH_SIZE} --eval_all +``` + +* To test with multiple GPUs: +```shell script +sh scripts/dist_test.sh ${NUM_GPUS} \ + --cfg_file ${CONFIG_FILE} --batch_size ${BATCH_SIZE} + +# or + +sh scripts/slurm_test_mgpu.sh ${PARTITION} ${NUM_GPUS} \ + --cfg_file ${CONFIG_FILE} --batch_size ${BATCH_SIZE} +``` + + +### Train a model +You could optionally add extra command line parameters `--batch_size ${BATCH_SIZE}` and `--epochs ${EPOCHS}` to specify your preferred parameters. + + +* Train with multiple GPUs or multiple machines +```shell script +sh scripts/dist_train.sh ${NUM_GPUS} --cfg_file ${CONFIG_FILE} + +# or + +sh scripts/slurm_train.sh ${PARTITION} ${JOB_NAME} ${NUM_GPUS} --cfg_file ${CONFIG_FILE} +``` + +* Train with a single GPU: +```shell script +python train.py --cfg_file ${CONFIG_FILE} +``` diff --git a/toolbox/openpcdet/docs/INSTALL.md b/toolbox/openpcdet/docs/INSTALL.md new file mode 100644 index 000000000..21055903c --- /dev/null +++ b/toolbox/openpcdet/docs/INSTALL.md @@ -0,0 +1,38 @@ +# Installation + +### Requirements +All the codes are tested in the following environment: +* Linux (tested on Ubuntu 14.04/16.04/18.04/20.04/21.04) +* Python 3.6+ +* PyTorch 1.1 or higher (tested on PyTorch 1.1, 1,3, 1,5~1.10) +* CUDA 9.0 or higher (PyTorch 1.3+ needs CUDA 9.2+) +* [`spconv v1.0 (commit 8da6f96)`](https://github.com/traveller59/spconv/tree/8da6f967fb9a054d8870c3515b1b44eca2103634) or [`spconv v1.2`](https://github.com/traveller59/spconv) or [`spconv v2.x`](https://github.com/traveller59/spconv) + + +### Install `pcdet v0.5` +NOTE: Please re-install `pcdet v0.5` by running `python setup.py develop` even if you have already installed previous version. + +a. Clone this repository. +```shell +git clone https://github.com/open-mmlab/OpenPCDet.git +``` + +b. Install the dependent libraries as follows: + +[comment]: <> (* Install the dependent python libraries: ) + +[comment]: <> (```) + +[comment]: <> (pip install -r requirements.txt ) + +[comment]: <> (```) + +* Install the SparseConv library, we use the implementation from [`[spconv]`](https://github.com/traveller59/spconv). + * If you use PyTorch 1.1, then make sure you install the `spconv v1.0` with ([commit 8da6f96](https://github.com/traveller59/spconv/tree/8da6f967fb9a054d8870c3515b1b44eca2103634)) instead of the latest one. + * If you use PyTorch 1.3+, then you need to install the `spconv v1.2`. As mentioned by the author of [`spconv`](https://github.com/traveller59/spconv), you need to use their docker if you use PyTorch 1.4+. + * You could also install latest `spconv v2.x` with pip, see the official documents of [spconv](https://github.com/traveller59/spconv). + +c. Install this `pcdet` library and its dependent libraries by running the following command: +```shell +python setup.py develop +``` diff --git a/toolbox/openpcdet/docs/changelog.md b/toolbox/openpcdet/docs/changelog.md new file mode 100644 index 000000000..b23b14364 --- /dev/null +++ b/toolbox/openpcdet/docs/changelog.md @@ -0,0 +1,40 @@ +# Changelog and Guidelines + +### [2022-09-02] Update to v0.6.0: + +* How to process data to support multi-frame training/testing on Waymo Open Dataset? + * If you never use the OpenPCDet, you can directly follow the [GETTING_STARTED.md](GETTING_STARTED.md) + * If you have been using previous OpenPCDet (`v0.5`), then you need to follow the following steps to update your data: + * Update your waymo infos (the `*.pkl` files for each sequence) by adding argument `--update_info_only`: + ``` + python -m pcdet.datasets.waymo.waymo_dataset --func create_waymo_infos --cfg_file tools/cfgs/dataset_configs/waymo_dataset.yaml --update_info_only + ``` + * Generate multi-frame GT database for copy-paste augmentation of multi-frame training. There is also a faster version with parallel data generation by adding `--use_parallel`, but you need to read the codes and rename the file after getting the results. + ``` + python -m pcdet.datasets.waymo.waymo_dataset --func create_waymo_gt_database --cfg_file tools/cfgs/dataset_configs/waymo_dataset_multiframe.yaml + ``` + This will generate the new files like the following (the last three lines under `data/waymo`): + +``` +OpenPCDet +├── data +│ ├── waymo +│ │ │── ImageSets +│ │ │── raw_data +│ │ │ │── segment-xxxxxxxx.tfrecord +| | | |── ... +| | |── waymo_processed_data_v0_5_0 +│ │ │ │── segment-xxxxxxxx/ +| | | |── ... +│ │ │── waymo_processed_data_v0_5_0_gt_database_train_sampled_1/ +│ │ │── waymo_processed_data_v0_5_0_waymo_dbinfos_train_sampled_1.pkl +│ │ │── waymo_processed_data_v0_5_0_gt_database_train_sampled_1_global.npy (optional) +│ │ │── waymo_processed_data_v0_5_0_infos_train.pkl (optional) +│ │ │── waymo_processed_data_v0_5_0_infos_val.pkl (optional) +| | |── waymo_processed_data_v0_5_0_gt_database_train_sampled_1_multiframe_-4_to_0 (new) +│ │ │── waymo_processed_data_v0_5_0_waymo_dbinfos_train_sampled_1_multiframe_-4_to_0.pkl (new) +│ │ │── waymo_processed_data_v0_5_0_gt_database_train_sampled_1_multiframe_-4_to_0_global.np (new, optional) + +├── pcdet +├── tools +``` diff --git a/toolbox/openpcdet/docs/dataset_vs_model.png b/toolbox/openpcdet/docs/dataset_vs_model.png new file mode 100644 index 0000000000000000000000000000000000000000..7add8edfc97bc6b1e4050a988b7d2840faa37840 GIT binary patch literal 126450 zcmeFZWmr|+`Zubggc8!V2uYRhZYe1R0qO4UMN4-}mxLf)(%sS}-7HeN8_wW)p8f8< z|L6U1KArR7yz`pZx|n0lHRc$1{O;dCEm$Ddi3nuqeoBJ z5aB@!X`x0H_v2|C?9Xc!W?`>!6`&|i~lFEJrM6?*w_E{AL|y&~7f zfEkbweGK@v_8+(P7%eeD{(ajTO!}XTzkdlnGo=3i(9{6r41r~iAX2JvVD9X!#^CvT z4Uv$(#FbMOqsrUD$IA)Q!TU!SZDj>Tm9a&l$y3U@x@2(hXsxhA+t+71Gm;iEC`?|0 zUxY;bmyfS6ooHt31M+*Ht50*)p1j>@$IWOej+^IkT6z>DDTz$X4H0_jOCE#^KK0by z9oPhF*Pr*Ee?3UVgMVYP1ra!79jrTG9!#)j;!jU!h=FmMt5=2wH`a5VN7FDO!~LV6 zi=(0oYbIq)m&ez=gS%ZJR)YphOBrrTyA~F6NAvroE|<)@<>huzyJcn<9IS)=vs`iF z^~>D*;O)#3jaHI07u1OEzGe2~%J|D^i|Um>Jldo$l&snqA8aQaiwan1+bbK0DgohV zMpy3hMSE-xS^i`qHTFY;ytlJ`K2CETVeu*Lb!#E5Hh*#TSu2YZf zM2V&!FgWXXEa;%iv)Gg*u@Vost#K>T#w(O;|HqCg*d2WB~?>?TZxufnCTzZMx4 zD|y@k^wo>lI$S!x zwizFvC^bJsaKquLvife^?xOCm^Wd3~vImFr6)|w(MJ8rTswcU8k;qls(Qw<#dKQOe z$5>@BGBfv1!vTAI?fY*<<Dvb-J)CtdUn;0&b=O8IpYrfbm)pScgj6$W z55MG>6mcgap@rfqySY8tNb!7?JW&GEhIHrXi!-rkxhG;v#eYy%6Zyd+K}s*EDhA&j z!4)JK?-tf75&%1Oxnyk}1ZJ%<@YCq7Ye9pOm5puX_;NFCGNwe6HL*m~9w}W>&FqB~ z4&Bl&zE!E-wbG2Lnhhm&?y%JoDU~LaDL2IfK1j0XLDyjqz=N0`uY;Fze@f$gGWJM0bGJ;yk z$qDojq45u1*bR#RBQE~ts4Tf}-<)2}R%4BP(CQ8d!C`!mR?kGHj5)*GHnx?E5v;TA z?p7ep#H)U+w2NJl-;?9Eq*Aza`hUheVY8E(-DsY+42MWa5D7gIIJDavOU=|X-i!}{ z?iSNOqx|;!jRp(x@%(#oZ6E=uowFBf^G@YUsm@?v*J|~q&&ITzqmVXaSB=nkrT6*W zfyZ#p{1pN<(94tLG&gMDgiMhQ!~ia$ox0Zl*1U zbgW|W5~I=2R;e($!ZXM^Kbfrp@&IN+2BZ#cW$S-faxvC=E+A;pOsQL?OWw~yV)vGLy&q6t@ zApvk;O0^Y+FHr*ypuEyaZur^i!f@+kng!&d9m5hrLdyF;6xBUSh3GcSIzsB#00tR5L z_kaiXip{UugG?;VzhxDLCHT(}DXUR>o<7e7XcjGFdr(3C9Q>(cfytbaM8j$k5 zCI=z(7`=U|+{|({{e=>X*?_F3N_o)q_W8lZiwZpVUgf~Nl04l#GlswL37Efz-QXoI z24ctZOoI+l{d-4iQin=5T9ukd%I{6z8l~9LBe1x3ZQooTiU&$c$!#b{+qFq#N3ofdM9uCx| zSRZ|cbhp|-_$8(6Sek3L9jmP9+5p(5cpiT5S1FHPQ+xlp)CKc6zGUqWp{F1zJ?do6 z>Gs9+u0V&)162yAdWed`%4_yR?Tj+&sk;`wB0{=Kv~B^wOlpwZmo_}z{OuTOy)S&< z@$2Gc8yFsjZCkCcUlUX09*X>7h@HxxM$>syXBOh;~s+Q!|CQBA|9H(|HZ3D zH7Qwz(~};d+-9EOeQ-65+p@n0>twGz*W^b5tVf)rtIR^ZcHZ^iPs;h_8QWw`YD|a@ zUbd0G?Z{;D(m2=ts8fre?%mNv*6&Y>7zzp+oeG{VkZv1m6hdc+Rlin;Butj_WneoA zqIFvj47wMan8J4G#y@ajkOubo6yoObza?tW?i;&A?4 z2M@mZ+3^O5cVzz|!e=WhOvrG`tQZ-Mm3|59UGVV8L_(%6@a^591|11Dkc8GLqz!F6 zcqR+F0Z6{KI0F7Om;JVipqnmPyw8=zVZ$pl`@@ z7s_G-Oj2!3YeUd*2{D7LRtfOUl+r!uK zTS=MeewQQY%|e|Y(QNhd(dF&U2`*Y>bW-n~{MhJ4&yc6+ds6}gT{RpAzI!KHnqae~ zVVyabNj^xRNs_wF+Bh;0SFtr`e*uH^U!JG?x94#}o^-$Jf*roJn;z$2G+4iGxLP*! z5OCnW6Z`^KW|5rZ?{eeC`2hC;`9}NvyzCb6V+(4Xzz>=JbSPX<>=w~+qTXYKiM9O0 zf?n2brW`Yn0*zGa>=K;q(d%d_#$p$KK6+fdGpF8_c7}~=IK6^q4IxyPw|yQ%e@mc! zAUT+7|LPV2h01Vk6s@fAbuk)lQv`O{O7GyH&YX$XatzklR`+I%?7HAvn%-%Ra-Rew zqjd52@870Vu+Su9cBcvV2UC&&v$JMB{CiriW-qE>OGj639h4Lpu+z%}@RzU1E7O9v zfN15E7030C@_jX@(Vgvb_!PTKiS--<#)%rx%P6ygfe{4r}TcA()D&f z>GU&X*Wl`ri5Y2eHNxc&FT4L2UuKY1b%BQsafYCnDe29!SL8MivT@veN#Lr;!9)!> zymXUpux9qxIWQrMLZlmz={VV(j`2B!@}B%+Ak8T%ik0B>wwPu8J&7JOTf~D9qx_-8 z>t}T}`g8w4XU{wJg>-@Vq24Nsq~xSu{k>ujv0w?O#5TKrOPAH$MwOxKYL;BBEM zNsv$gK{+{P0<-0TzU?<5twNW#k5MTwP%$erJ1*QjKpYPs!$JnOX8rTvFN+hkP<+-Y zN*L6`up?zEM#Z!p=iVaXv>>ggf%3otEF;pePPXJ0Tiahl7!%U`+_$3sA_Rkk6EUR* zht8F(Y)9AEmK#3KR_HNliK6G;3nxUZq5MAn!v*_~T(C}ZTs;_w>-SN?q%H$}{sCDC zmHQQ%{;xpvLbb|h35vEk4kPRF(?z4p712-1iYweUb9wed+dZw&Mol+<0jAW{R0y!$ zpOuvuF)1nDi~HT%$5AQK!qTttR<%vd^0^`fNxuwjw+D+IB0QP%*dM}Yb3qOuqv1gS zY&y#iq=ZkGk8L5zTSEde6p$5i|JE9oq4w?5l6@Fr32bR@*TPvVz zlsv%?VXSlG9M-!vk(!V?TkQk;qH`em_WijnX;4p3M`t%WvLgmk00`Me57;s`RTXBT z0(Iwq2mtJVWtJP2c4p`$yw^s$wsogVt0gHi(hBZGY8=Jn>pLOMU5Xrwl7_PM{yL@2 z7KD>!46{{SpJ;FzL-b~ARxmM~wR>6NkqLvqoIEXx2EOi3i-zt_asC@I00r)NK@B@} zv7yr{Pso%Z{M-aZFK+X=#j}|+<}ZlqP8aMS+?}1Ocg0Q5sr>FY-WP{4RxhXJ#!pfl z)g1kGq&z z==zYH_UuC#5`TgDpv4d1*5X~Yn){prll&6$^_Ba&xD;Z(pdc|a5V0^=tS??mM+Cqj zJ>yAaI>kT34)UD_xd8ADX;zHZWmpm+VPH@Hu9)mOFMm(c_mDUAGlJlo$G}d26ahPy zb~)ebKKR%Td;ZLa*Lq)rm7RTw`||hLuic5%A=CaT8$f1P%4tL1$lVar5UdHB&#wD5 z3&?=>wpie19gcG);1@W}2Bd(s(6c8#ZtewAU*@HZm z4A~(4Y zH~@%T0`9o6M{@f`W_z-{Id0w1!)9`Khjt;n4|ZsMio)^gf5piivp+ErM=M_iN(BI+ zf*{P6m(qr~uL0ea+#T_Pa?cJ8C0*)c5pnr4Cyru+{h^J$?dBdzWleEQP5#z!)G7=0 z?qr5r9~m>H(q{h|)3f-fSSjg|nOJq_z0WSI=PNZa-F9mO)hvK2KrgL4LCFJDG;FV9 z}l0oS#Z`>u~SnD2{J04Jq+ z#T2D=w`UU_tq^#4eLH8nF;A;!;RnCxBebc*X1cEjoDagFKQK4(y-0O^aD&dV@gq=2?BA}&hoMw-@d zO87XQb(cK^)HI@5JWe;Pf9R<-6zhaufAx^M)N##Lw@Z#QyzVcGKkc9pnb7JTTl2{mV`<)B+*>aSCKxagg6H4rw!Bkvl zzp~a!5r<|ct4!v|>u}i8@#U{R25nG+L+#i&KFq4$`(9a_IH{QsY%y!^Xn!y%z#BZd z(f-%nRR-vGa|xK2?PEijB@M0=$2#5Qz2|7%{)LMq-w!Q~IkdZ_wo$ zYa_L8Po067FDY)}QPx^>HMRRMkU*W32J#%#q~05e^Pr;o!6Dck4|xob4>b=yTdWCA z2rGzFP*ZGe3MX;mR-SjEHc3?3DQK!n0(b!kVa=rx+R^U*eZuchX>}#m_u{UEL>5JW zPRft5BwSQl9f1V0<}UntQ)LPv-eQ(Efe-HAG|GoXCI+va@sZChpb!r{m&wEP%Q1Zo zXq!qm!Rr7OU7Z6ae;w;T_(seY>$B?6tu>0=?3HqhWKfc_P^lSZVM(;_F_2E?PnN{h z3N}gF8Dd0!$nn$Mow+wOtkG=_4{Ja<5hG}c6P0Ve&SsYH+-~>W>QxsjN{hDE7JlN* zkyFHBpZHx1@E%4Pv|?ilVNR^JDq3~tXU~8y$R#qL8n~>U>U-vxV1Rh~5GDxPFG+4Z z3AeMJ(me!!JxNxAjGy`lQPv4^dI?mTCGmplyghj)6yLM8@u<=j0o_)cD9QMlop8ig zUt0JY6dsWw{K5Qi0g;JgDD8}!JI1--=S+=PsH87RhMGV+5&PYBs>+0DAPgn@yHWIH zL1fcIdHT`@pfn#(RfS!j;d`guZ)Ht!tG`*FKfh)H+0@DF*BvB?Ads!qOMxD{W0TT^ z01$!kwxPe_XzkDgwCLcwiItfFurBqStqhQUUfg<|O^(jJ+e(|S;U#8pzea1>IIjSx zNcH}Fr)%x>YIDr<5r29F9Fm{#&YS6;x^bFm5P6H}2C|I|<%bI*Gswk1PF0ftr45mR zdgMyxz(o7~ZYQU;46I#AQ-jEzeZib%V}^KZQarODzdf1=r{ za*u(xe@x9vEs{az2`cU)_d+6@Dj&X>xNt^x`MbRBeQyIY&MgjYQZSSIo(8JbF4sV& zl>65E>N5WK!?u6NRABaqUz4?0nEtu+W$>Exor9t=J?QJd&SA>`)AYAU{*NU7Wf{Yi znoUomtOG|=JD>Wb0s;`qjO-J-qYi))JzR1`!gn-Umz!tip|XhqXr5y=vA)B$*8xdU zRZddyXd`DhNIAN{o%BmTcfr<8pz5w040i&0Fz zeeaLnWQ@$99GjjJEB)>R2MTiStcUOoPopuxPcGUxzqCg6J$_gY{P($x zxGu^G$??2r_*z6u<@$km@nVArbYe})rq-8m=+pe6y75Un>+73dYkH3rSE$>exr^SL zxzVKyr0FKDn@4MZ(;Q-1hiR!*f` zOuKbFYG-A-;N*dHmYJvg=)5zZecat@kU-_%+CBC0MYki!8bw&~x}J-cwtcjQ=oXgx zVFxcUOJlY#k)vvhXTIZZe@ZxO{5mfax~CdE2(@`q1dNAJD!wa? zm9dx$)`c(eW1hBec2SCUtOhkab1Rx+L)$Dv>rbD!Aw52ejZAwdtn_?7V!Qor;^tQ4 zjCR4hd*7hVT{-e=a#xd^jfeaZ^F^Us&|}%!Q{mAUc9(&>FOmGW%#E|$22dUQwd7dw z47+bC`x^|N$Me~}06MA5w^YMd+;&<_VC&7JKkNN9NX!ifj`!`3 zP%!@~TKyxr<=4xi7i8m>3>&cp#96gJQ73;`r4FhH+*8@SL4vR#``j?WC0q|q0ClxBbe+|D`sff7Bu&kL$ZSq+%GKJNfeLnd`__d+h>w_7!5=F2cPXaNf zRYQ7c(>&Mq#^SfhDWRX^RgAY?0t(fQka+BH=Ib*frba5G`#-ax4Q{-}>*X#u0_;A! zb`vj6cUzu|+n6J?>#c+w$1Ly!u)_Q07p2ldHx=2N%R;TysFgUM!$WWQ zF_>fUPY1@hF1t!rdVQ`;#RfDm`$c247Yg=(+G-#H_r=;|Z+TtM?A&pSNb) zErP}F^NLA{@)c_<@UVCu;5Ixa!R58#yB&i3Yx(4FBZR0(`!Wcxbu&)ise+jGyPO@K; zat|d!~{Hsb8_+gQ!wux{|7O6SLyH{bOGK{Z{ZU zatibbF7KH}RYR@!bhSXp)-mH@M!_#4RY6iN;>oIN<%;w6B5i`uiz{f?{e><@g*;eJ zt@=ZS!+Y{v&9L~JjuB;jJQmZ@Hq4~NEW^0V7SgS=NeScJZlVbsl4uK``BcoXTr``O zk&zu7#=x-dTQEcx36W#P(m9h$%c{&r_q64OdAc#R)8k8#QhE(`u}{dVwUi4CWJ_#hm{}D4F&eFc^dEXn-rk(i3 zfg^VOoUAJSlX%HlHBd&+&xbT-w(`f^oII!Q$JdKceOrGV#yx}>iGjWj+>P#ZJaNNC zfVG!Tc=^%7cZWZz2IvrU7h+QCTb^!ahm{OBRP+XW`v2?0j}W-(v# zO$_|-_V3nr-a6_$xaQeaKd>e@_obIQwg)L|)wn@-dCrNU_sgQlwHFlXX7`8u1K6i{ zIg-L+%wDUU>i*pnugHTs3bA+x*4B+aj`wJG+fLB@gGVIM!>fo zw8>sG!iSVHs^Yv=-r z%B(xTQ@xByh!u8o!LVSH!5UDN9hhKFI{Q5=KOzWX)_{S-(gAw+GK=Id2`YZNsIwWI zs;0`|q@wSvyTuvbGre*~xmA1rSepk#aH1A}-P=M=<&9(W^o$tvXU7p>PDLY07d33& zJK-!wyB#DDEh+Pn_$BeA9gsAF^H0a1<6WNgcG!7&v$c3&f`9be|BU%hmp5UeoGHAI zNnlpV{ZoY0x-NVv)~708Y3JVcxDpyqee9`kb_tSc1A;>UMyf1f>A=%Dp*qmm*{4kD@ql(mO&KHjq3HHz;C^)#F-2CxtX z%)M8m`0x<^5Nw6@vV#oevbK-B9wdC>`y%J)jXeavV^W0pfXddVOYXb}{gtdQ;sxb- zwQ7&$Wow-=>Tz((u6@YfWaL9Ns&?FL06+#|67&wjvg{jRlOj zzk7w})?dX{PzKyt={H#ypev9*s+^*_APHAQ<<=g!9+EKPoNMk~|v%ET(t*Y#>Vf z*Qg5A_esrZXA10cTC%{N-8vq3JN_2|@+FTCIxt|AjzMzLlAZX*tIn1z<$Rx^90cDE zK`Hzp3Q+xP(Tvb^J`qT849}zK%-zkfkN48Me)f@Mx(&#x&qvU#K7<*nH$2!8Lp}vn zKfX@ni|GQfqmEdLw0(U;DH?3})g3yq0b|6} zQ)5qF_}o=4(CDW(T|-9nO8t@=wIzJUcZS>fJf2}xbzVX`o24VJT4{88%72Tg2Y(84 zGE$N{db$~*czmd-ug;U6lpuJiJPW7uc%u9y!U_XL^nki+#HtNZx536keYuL%pullh zY0MWp{C^}q#0+imlpw)i*{}EB;^37mt@WW}i5t`VMAD`1$}V1KtrqEg&Kt4CX2XTb zSrvvItSw5b?r$=zXYV=axk><1i9pG%G@*qAkDY|-@L&hZhJ&!h(d*x@w39v+PD_sa z&o6O;46X2k?|M1+&JVw>)5w(M@U2sAaXOs4bvC6N3wr8##wg5^zrs|x^Od}5_1B{z z8seOxt#(Ft&9{?rA8L!A&Q=|ET?WFFv&N6>iPGA_Z?Ifs#!Q|3!EmIT>0e~H!JZe# zS_wHR1NY|Wi14fsQ#9qy^zmQj$TK5HT)vX^E8M^bb z!gQ(78%?UYa|aN0110y;!nc?}SW39B$a6c8ATye?<7%gh_#cTCAkOWrj9^*yF`io{ ze=eHQ$+%G&5IZ`G=;xyVmDSYY2x9UVOvrbgjGUb9Ee3e-V6OwRF0zjgGcsKKL;bn+ z-WCE<_A>()=^IHK3aZYS37X`Lm~Q5S2AE{8q@I?4_iEvFlNWYyIbcPF-YoEt^694Qx&#%F0ziU}69HE&Oix!fDG=Z5R&3u+?q=G~(*tve9B)R@D;9`H zk52_#=Q>V!GX&tM!*piV3J@zDF-&)}U5+*VWr#yaf8Fb$@h@c+qjldm^QYJMY0&o= z?57Go!QVk){`BKZp!W-EQ;pB(e{3f{Qp?gRV&Nh_fBsfVaGFq<)GJcxjMxc3J~@5p z{;=$Rn_&?PyI{g}aftfKe;>`XE>As>o1J%_L)dvd9#cuTK6b-Yp=P;?+sc;tC3N`O z*gI2rSYMXt{kwEd)-pKx3wjo&xOc+TxyCjw4SqiNCcu-mGco+Z~> zk61u8h@WJ7M>#O_w-<9IH^m^>y)BpRVi@+-iWCqUd3EtD;era?9hPu=RrG0>Psq-O zPD6%Wy45hVA|{aVtE^zQUyYCna^oLq{nSt{+LvMANjnJZk%!3{pJ|eZueX8*C559Y z+T=59mnxk&&RdPcO=z+3VvgFb$D}!xfa@ zByV!Jplnr1IJ~I1puGcG;uRGKzTP(hz8>zxxoJHK{Mq!Gh8R|&k~WU~6o-P#~ z{yIS@Fndbm?Y?H3207YgcU>+bZlAR5kBKSoKdU0^m@wVof$3k#62DvQ@!^ae{`y*2 zA#-zpVfbA-3xf6AE>h2{6s|A)MyN}lyfMiTWdpx)qzZ}OMB1+eWFNaf?evXymVKSL zfxK*g82}&6WWe~}YJRwCHfR37H$Joo1cv_4N&I!)f<{J8`oB94e10MRZ;u<&|1Tdw z{1_ZwBvMB8VH4|O_S}3Q%MS5wZZ`AEiv9Hh&_+i;%V|gY`F^z*CMUD0*SJWA3k(f{ zv4WCmDzzcGCW*2DvAE8Zv}L3KH~DtbYan)V~_nGv;3Mr zPUqI`?r3BOKF)l^aW}iqOlfE?a53<1LKEPg1WLWvX*n3_p!~jM#CoWA?9ymIHB;*A z`>Kyw;Cp$dFu(cM08SPYWa{=oL(@Q5WPkdt}n;aFxd9N4;`@x8f_n>meWIR`>FWLfl+g zk`7Kj-znE}*K=Ks$QELUx#4DYN>B7hN!K{xc#XWe`OHmFXzqE3IkP=eV?9!|iR|LR z^vRgcN_)G4B6y@kaTmX1*o`RUH)}ICBYR%k8|J;hI-OD;`ZD{n+bQxwO10 z36P;R7ZGPFs)uQQ=8`R!4Hr?(JpEG-EX|St1|`pq!*+yAdJhk=E}6tZI!}+#j<5ES z6duT_>@@ofZc**7jz<@5=ZIl}tGS;vG`oVUac#NnUHC9_(z-P~ox^`jL7&Pd7ghRk zq`9X2Ig(F?(EGOoUsX7T!quAfXYY~Xor;b|d=H#GbX3@H44@5qFGyOh%jcCd5dx7M@=1_#r@>oRaPMC7fFXBf(Zm9i4 z#V{c_T>^P;Evu8ys+4AmETDYE?4EVes0FdP3+iIuDDk}ZS5KkPtMlr^&U`d{W<%aw zs#mWLXh7rC$@Ketxwz=a=&r9X=$E@!PF+T}?Ef-L2A8w40O#XMLHzIDV!U!aalYp_Hc zzYZRGr%$CFgeLywD@%Fp$$b6h!1TyK&?&<7(aC#YydWTLz?@&yxJ}>Qe8|sVad5u) zE|2}Jb604od10z&43Ec|pwVHr{=lWtxJxEY7OKxm;Qq~Pme}RFmsZ%x9alq@E!<-v z92Z+IBFJ2AtTOR63w8MVM`evxpX&a#H3Kt5q!$|jwp^(<>g%^#3@IE&^o@D>ZFk@u zho$ubV+&L5#i{}G*#j;Gh6ZG>{p=_7xiJc0#TT)pXzmL|-^l>MmzI+7+1-_j?^YGp z1q^r|T@*R$)<^=M)XZo~20Z=$S~C=aLVB? zZhR8u03ZLXSYr(T(^fof;Uz+RpPQakzPN-Geg2vwXY*K@uU2oqofME~%k_&2E?sYz zsyujb?qp2D3_@1VJGtOVDd!%fIeKdJ5hh>p&Toer@xN;Zer_#%0Uqu!sGU3B1g>ITLh2M zjnzYp{E5H|fvlj9F*%WJP|jmRne}%<{M6yAWp^;Nn$T^Vp5%60?yn3ImgtPtu}LTL!##k6*L{L-wmwF5 zG&(@w3$0A?B`pbzJR^M@=$2BF-0$J}2|uP`lJsIK!c+0L!`UfOeuLliJXPROo?+Xw ze=h6rTUN09fgI89N3|AePLUcKQBje+!f^C@+CHJH?JZh|s4z5!R+u%*pY=R@ zwrS3T!T8@B=^fi=7;zW_U!=ODSj>=v#h0gYU4PwMBNTmi!0V?1JP^-uK!pN%Tgoaw z^FwWYptqu{F>PUHW6rZ?K}@FFo>g_mq@O{xzg&nGlb@m|9!lTb`V}Uf6XfSdH=7fJ zS(9(;yQQSgV|hl2_vEy*;wUYbI+AVaKOysxgd|Gt6*9fspa7#)ybj?y*^6f-E)X93 z{JAs9b&-tpZ30s>4{l6nXqSPLeBBmfC*koA0adT8q8E~JjN+dqAPez3FT^l8I8h{{ z4-V^iQ>Lr~_0sn;YO_(r)u_d!9*w93>B|znPp?4GVZ0ns6ZeVDq5(a_ zmY)=btQcS7pm)yA?i)>vbC616B$Z6KwY@-_7nk__bf~cc&oMB$W&PSLYVSlW{g8ZzhQX* z^2T0w8Z}#%0H1?{ed$GK^U)T<{;-Zk`>sva*cJdFqrtFh*kKL&#q?)k;o9>O2Ae*= zW&rW!Cgaa@wCpLpa9;3RR);jIaa0)Wz6H_3x2WjZr`V-Z#Bz8vy`%mRJl-p0eU+T5 z@|0@L3mOC`3sD7Sjh-dNZ>11kBWKUf9`f%yK75<1Ui^%YK z(F%;${ocY?T-2_!q29Q<>S(WED2oHHW^*Q4vadfg7r@kn+X*0;JKr)_pVH_00Evpp zB(b z1xpG4Ijfd?Sy~I}3!_w)u*C&iLY-xFP9oGQ^O+W0GXdDI43VFeRltOo(W8)wnW7VM zHQP#~vLY$)(^1k()#0o>73>RC^>ycF-2+Jww#|lfveqZ5-4{##x4AO#c$9_ z{Q=AG>rZ~^dTpEyEhv(g5YAI@c$l0Zro2Czx`U`R5xJUC(|vaU(_d03+vX$=AXF(K zH&ACUr?uN`EGboD9g$ZJprl6*%g_I`OD~+ofiFQE8zko{^TKRTt-r_O88p?>rCgC+ z!S&nMwg@M?1saZMZu%F~-?AF3VjIqzLFQ^}6hY}I`20Q~-xfN}ezb7^y!jDkmL~^M z$(YO?!mPCe*9UCB(Sb{V%ZFu(>>q2dIW=T`W1+VUOG=jO7BLXylv7_~JBeJVdGZ%)Iv zd#h{6{N0%5SXlb0 zSbzP$_5$D|EG~e@uBV%-ns6(19uA$y@R_kH+`F*3WIaw#^7&p^yH7{(qToa0bN;G) z!+b|v8~Zi-LDxfgU4-m!d3`)RBN2`(dPU}B`?n>CLxFlbMH}7^2zgYaVejx00L_O6 zO&0~lXRgZB8brH*b-H0GW)x%*4Zxfh--*|U4Idyzk6w*VjJ~sTr$eb6Z#bGt-{i1H zoc0OGvZ+@GeZV%1Itrj8gvcG#xjHXQb55X-lfbV)z8EAaA(lHfVzHlDkd9L~urBZB%eH*_q{Zhla9xi#PUmkc_k#%$^f^IkH&c(xV3D zB?f&-?!`zdiz85nJioXei@zPwObLnV?-FI0byw!5K!$dvTQ4DsjZVK5?=doS0i zQd2I}`qZ>P`7~TAGFtmB-Yzy7SK$J966%4Q_kU5K&QJ92E?dRIiY7 zvyyR1>LP~Xo5iaeL(={f(7fS}?#_`@wmv2yS<5uDT<a zORcsId!>S3)BoyTnD%;j_pVT=9go)(qG&+LdwV_0S!(x!xSBBCKk4mr)V+sC)*MF} z;oT?O2}J{gz2#|IIX2jrw>BmTN%KtUnlb&P2yj|a=x!m^T=f&juxbdwFrb(LjKLw4Ybx7$uC zk+JF9-d_Z)p+GH`v!T1La_=EY)A+(g8krwM=~f5+ z6F`wnm|N40M&`;2*M0A{abbbZ__dWTYa$+p0XNu7d$BY3GaQg+OrVZKm&S|VN^y3^ zlk5|;p78?w*936l;iAzL4a8^r~_DJupc?_A3|1KuJLx`hq^qN3+DJXGPTbk7@o?oe6nx<~I`)OWld@ z@#K;iXbY|gX3C!4S@p?jV9z+Go3%iC@#jIkp~j9-72IVz<9q%%dd+w0!en+<4bP;k z5!uESX;m5mzqAN~+oyhd(9!2&ruQDj?95d8-PSuEt-dH(v0{Iy-W*5)9{35=Co)+G zKJ3WmpQVwy>(Uc+!q!$Np0F~($s;17k$HS32^BKjq1#QFQ{lU*a&Jb81@q2YB(XACofD+f)RVesV) zcmu7~93Y9raw`u3Y@I@-Ew|vi55O~H3^Bu(ageZz3c~aov??8Bc9x4?$qp3B+{4e< z0rBRo<6GL>EHpMw^@y^F)UFeF{W?Nl-5q?(86t*NtPCd2%p?e6gT%nQWW0^uKKtfk zhfpM`2+H%7vUg`;S>J!AiR^p&RW?M4?0W<;{p#nt;l}O~L$bybUUpx^@L^7KbcjG=&rc62uyj~nb;zrlW5m8N*c+Q? zi^uupZ4QbE*f7y+VwKvx_ZDs@9Hsw;hx1gIzFa2D2{#i^bEod z!5tl+ZcSR-kK%)J=7Qv>eawnJE2Ao@ZwTUV2XK(?>Ox zaNtj!Ap6Lo$}yaG%gV_ z>!BEi$Wdy{fUIN&e$BzQT;kTXpRW!oq8+#al@VXgxSg6&HOVia1OzfazZBr4Th$H` zmgp;|DG9^=L@7{xnwY?;D!%?|LCBxmSQPu+(>1EXNL@nLg~a3jH;vofvMXe|Ir&!t zC;jE_K4M5H8U34Qii#@Lx^HL}ory$0g7+C;v(P6?x!JxYnqaT;Eb|9cZfx2TcKeKh z@ivo&5dr>C@1tVuP+Vik?Ap7S=n*uPTsV$0Vv~fI1}tnu%}%g zYwS#yqo-;P{X#N2PM)?yL@u{BK4<6GI-_S>GiW&3B$4RZ6qi@ka(VlP_epfe0Sgt= zbv_|t>F24MKjafdpM`Cf(bHEHMr(>KTUr+C$9 z(4+3p_vDf^$hfM}q%*k2dmU-@9K( zNJzp~wk=;#(54Irz6Q~PD>jqdnb&!U7oZ%|)isOr%w=d2E1={=gp)^WNpi0l=vSg< zi(!70>Cto4K51v>(}4@x+P|}VLit?Z&TD9xYuw{M3pjhILmY{|z+==!c4-84aZw{z zo_h8Yy=z>vA4Rm?oBKmcQ7}%|bw9Uc5LH2SQ-42B1|;d<+9&)qtw?#_`nP5cX;1Xy zQ&3WRQA9aulb9(LW#_L@BxUOP@t$9h`-;C~-1xK2m?UisnV*MD)rS-%bJ*@|q8Anw zZJxN0Wo6>?8`8$)rb*eF5=2wH`U?_cW^x(i8ZO?_#5Ht#@zn>0hr#Nct1-~AMRjeT zJFtykxSe*w3%9pz#i#lV%!rBIn`6W%d}h4IS8}gGR#e32ccG2fbVp$-vti|6D_@U^ z1^2tCCM7{(fOpAx!HGxA1QaX!g=)<7cqd=}&)8Y^S!8e&L=W;!6OaS0#Dt~(XV zvQqQsMQe6sZ~Dbjb{z?xza6HC*o`nB&L~r2|5$#=_~R|8Ju{gt(cTMMp%szlk80)xjIPPeq^Tk&1DGb&r$CO zMQq7vc%cfHN`-hFqm z*8SF!nEB5+`}}I}V;zvw28vpyRw zI4QEAEP3^UX+K%uPWNRC>hNOLAD&>s}O`32yAg3-6feG8LSX zlCkFRoN@hBLCY~a_hvbIge&>rfkawu0g>`!)+&izVjcev>D=gOb~{dcxtv_aCmGKW zS|q+mEm=n@Lk+FMqHYTjD}j+>y$>0z0C9KMCm(=5-DEznzvs65*{$$o+Y3Nu9;=vC z!%hE0{v))Ih$TCdD;w!w3<=!JY|;!FeL!h5al9%+cx;W15>^ zFYhg%U~?xu;TzHF5_{B@nO~R? zIqexPO5^omT0TM|G|<2;HYoQlpTB%q3nAg}+G25q{{CN9dIto{`T}I#r=Mv1psKP8 zeLO?DC-$3Pq9HLgdKsX~E?;?)0ke;w`J;SYQ)3u|g9m$-d-uoy9a`k(F*DN#w{VHc z8C&x+`|wtY?Gs-sq{tnOo@})%y-WMH1EF>2>-wCls1c;{WY(|XtNH&eUUY1nzKPHD zLK}qN+3rpIEyFYkP+{HfJM{)r1qcvX0^LO+a4Ywp$d7riUs;3!Exh6QnKrM59Rp5qWdEoL2-b{ zO9f5^cFryiY#cZTKeo5MtaPd<0;g1MlO=RJmX)U$7Uj13xI_alYGHQFiTU<@E)Jv1 zSReWdHN&2UW1cW(BIuHTwDXhpczr(d-|Bi+6@P(*S%1%KlR{nW71VpuRjNkAMHAqL9X;3`b?H!+nB{qMv*}E39+wO^obn z09904MIr7RIfkS!MEu}jWGGT2{TUT4_T_Kg;!2i0EbI^a;l{=sn)o@)mznUxq13#c z+qTG*#W_mYK9UdpS(2!@2xpmnAAm- z2MZsY=s!%l2DggR*EX@8EE>f#1;L&O8@_Bqnnt5CfXedIuDBotp2wojzUB_AyLe<$ z7gjco{tudAQj^w+3+QeUm$U(GSb@ylA+?>pt;78)<0F}ly0uV*Ri4=J zEOUKNRGzL)PJf0BmOo%4_AaH=LIn4ynvLx5UXOp5pHFpW#br}V zQ@~x6@Fc!3}0-HKA2T3|{ zgdOIS6lZY?g{o9gEK$85R!z2?qi-2ae(2qHe`JD3-qY>l`jK+WC&ByHho_$V4}m7H zs33?*#jb=X#SK18t}wccvD$cTE7G{mZvAn*`jut2ui&t5As%{Ff1|kVS6_5y^-4r- zaJZUeY)o9w#?e*q8=I8Cju~1x#PMWk@80a$R^QZA_q#@!lV6#Ud*?Hxb~7@OF`qMj!b)-~O(ok~?6#D|0KDHYP$1_$Wk8{#kCO?YQzIE{O8RoT$Pfg0dqsumgSv z_v~lvJ`k#Eiu7HnIKee8CmvOI$PFv)KJipR03Rq;B-G)RFFjZx`M$x4B{8n@JHZ{OMn*o{4AVoN1m4Z0;MTRnxo=LhcHdVlz743WD_9$ln;KF}WwNEQvBfY( zKl&npPHNkcT*Bp<@!!3wgIwOlp3sFJu?DE;B31jwgcMzsN){OfU zCH(T?VGk~!;tKgcM|by7H6?nwcE6$`fE=F&XC<|W`IyVdRV1U{Kd`N)3?E&~{$&OzgSoIftX@UK;fVfE(RkXPl(Pl{a(4zKVhoHIf9uTPwd^%#KvFf=H{`=Fm#B7 zVB>12N2%GONl5TfZ)m?rqQJn3Y-ZuSpSa%mYlVY{IU;P`7hs1*g3Ar$3YlRCXx*|} z#5EiGM0q?lZMT*uQKe{bNH$Q9P<_D<7Mq?zWaps(Licrcr`GmW zTWiI>wys-D@yJ)uK|X|$_YxIzK^EfwsWWo2If!0!187tII!9`aIKln&O7VGoxhOU6MBplo!bBm6U zo#=TdwNFZH(zvh{*}CEUe<@d{!r$a(9MxZU7b5Gj{UKomK`lf@Er!Zm?vp#0@}A)M zVFcZ29Uhl#>6thA%=@uM#?&H4;Yh}ouP=6PN4@$8)osf*UA%!xR|R{M6a9+OPDzTJ zK1+p4vOnnpbetzHx-2)N8#OnDeJe^a{l_0)kuwO9T=hv_zGs{z!Z(^VY$vQ6*A?kI z4!r&94xb%h0CEkB8(%rmdwOiZs-KA;-AFI!_&v(FjfdT&u;ZHd$HVW(QPoqVVMNiF z+b;3`J~7AB#KqFxjq12{M@$uNEU%%@Xn5r>lJQcm^<(3m8qa&6mA9G2&DZFYmlmzPsR&z=p8+sJ5X@x?IictJKnt^-rt)8XdHnKJ^U*ud_P{Co>W#ks%) zo;5=Iz=&oAX=&SsFTp%wrjM@IxrKpIx~=kxFt(5K&i>GFe?(a=r3ygcBSR0&nJ*xI zS1vKTheW7>!~L(jw>J{x0=|(YGh#WZI8(mx3VzSi#>w@GAD@##cmSq8CB7ZbsF<7o zTQFGh3H+J2Y}VZ>h597{MPQ}r8^|jgi-Bq96LsSg?Qb({Igq1Ef;&886PiIYY+XA5 zIV3X&f-;PX8G?1wKSwo5NPKA@gZfvf4<4D+$jbNGX>`ocuvV6P%yDa*rwr)MR*n7E zIV_QFzsAaP)ODQwgaxNn-&))igQy6%NtY{IXUIzBB+x8RGWee3GG-0?IiY7)hs;e3 z3@i89ANiFKu^^(5frI_wP{6JOAEWF{9Mp<2|GL)>t_i$H=w(m(g-71kLa=kFug|6W zJ7^LhGpwkLtVEfTLy`P1K{D`<;FvjX>|xVc^DEnxG|0xhCUSVqmM;}!I?!r97<*sn z-S@`V<*8O_hfnWsRB-T-4AXTG<`I>yC~9dsb3FewlJjiN$)oRaOhAtX-93iS$Mmnq z@{{jReN?k8HBbQU@cxT3WY4L6Xqzy^gVw3 zu30eJggl&4`EgA1(c-H8z}rF!74Y$W2}t?!GfLkyQdEl95f)GS1Hf~XzePa4(~PJ1 zOMB|c$`?=7c)*6we*|AU_-ngq(Ttd>{gST8=W}Q|Mz$+|n=HvN(g49d$}fqqzT#62Sib4t%TasK#BF#%(P zZIM1o8KkV5J@b=3;rv49U^@A{Rz6H3bZ@C^amExAH(d~3)d{-8OS+B~a?b~fO@Ga^ zSn8(VF)E&8D@xB@u|Vbr7`%P2RvD0N8PM-z*oml-kL3?<44f@*U;3RuOiZ1koo|J)g z2R3Kw5T>x~ez2T*U*`DnKlG0_zZDh@4^nwB90G2`uWK7!XMvO~4y05}x~bkb&O{<( zxcdTHjnU0&hhWe8SHDY_`5Q$T=CL_%!l*+KoQ7s{)Bj1ldU%Q*@fm5UgY%f(Iq zk+-gYGb$`j(zpa(uYe|62#BBF%ue1#2#aZXsBSM3bwYe2K^o<**IIQJv&EjS}q`%ff*Vk9b*cc zF7l5v_3)x80KFdZt9w1k4*cZfw=i+B6*LUgn+M)=`BFaBQVkrPpRJad>exA#0ZR3+ ztY;Ry#*CSFjRA7Xt#+kPF*KozH>dl;p=Srf_In8z!{@njcwe29o zwl##zS|HPlm}%{gbjidF!DE4%JN?RD<6)rgze;EI@0NH;X=4xX@e=*ZgT)PGIaR9H z_Vf3cLUIPl7|0l>#T4hULEVe(Nnb$Jk!IH0^2}Y!8Bo z)^G<9qw@15Y><-tx3O?2{fJXA>D&JE`eKM?=_1eWBL9T8(Sy+NyNYbCQWTc$`BLzM zjwEgr#^BEs*lZ_B`M?IQ7s&llw#+qm%d!n(Yc^+KJS>hH;fuGPRY1ugi94m^pYU zQw<_J7flu?iyhiB)iOsZw}CSvx?qqnv9NYc zs$JIW-=X6E@EMMFIb99N$x#-Mc6R!l5UDB%CIt`nvakxFc1L^FmQ+;SQzRUn*}DP5 zkho$P`!y723F^Q2<+JX8L1DYE9t(uL}$4*#tgMYjf#$O>-m%si`jdJ@8T1w0b0do%$0TB8vYgtp>%Fm~1I? z1yTY@yD|6je=GHci92B7bzO;qtRR;9$nH*LTev*pM@j??q7A{0OzY^PZjLSHYFx1W zl2h~BzD8uCKiA;>mCPKiX1#{x%q8GC0CKy|w#y*k8u28rC?bfRt*75HL&gS@Sg4ot znsSY6&Y5u3Vg^s|i%>%*7(xx7jz}N%Qud0eR@Gr^l7u(*MjkI{o|@pK)#v+Vq957U zXlgA&5O}xQ-v`o(x3skSL2!uC7y#`}M#K1)hl`EEe-&S5@ocQ>4LWQSk<^G8iCrNF z+k`+y#eGfJeGCc`5;??mc>CD!3siYoa~310wRJV`H{)>mUK5oNNA>eXUFTX;#=_voMjM1`C!BUV8cFjq98(tHJG-f@_GPQ!F#TYX9!H zM4kZ`g--D=DpCw6tJ#K28ufMf;rXmU%k%TqcjagnC$aOyp34K4!U(}^QP|2+u;sh{ z1sYKju&`Wy=RvabjNNlHg-}RAAWzkhim7zBf$WEVRyV#t5+n@N5pIC&f z1b+O-row{awgp>QMC+4>Og=tK2!KQB1kqv1Jb*hBocm3!X12+x1s-sTdrIKdAB)`g zI!hm~a1+~VI7ESrz3-I}WV0ZQuETew=xk#U2wO4K!K}BjZS5c}+9VMWI^gFVSS7oQ z=Cqk$2t-bXM_!u>ohv`*o#$ab`}TE_|8J+Xn_qyM9mP zV0gG)ggW+1Y9s`^cdic$+lPG`Fz9Tn{+!()595PhN{;#QRtr z6om|Op;vc|2oS6x@LX8y8h>83(RkjFM?A!;^)@#|$3&Ih;&*&aidvG!DxfJY zdWaSD9G{qkM3Y$RzPjvF*{JC^bo;pUSyA=l^`fFj$gAm;%AqsUP>z64<5ghA}hOJqo#~t;$S$ zd%x0px?OZ$-ngKWkb<0-1EwacN^R9}Tjbum{;`2sbBaoe*~ZR->Z{rG7$`os zLGdl5#vQMs^rVA&4t&rynDvZW166jw{o#J|P*b7=Du0rrAtZ zdbI$9EoSnt5!@xHZ?y*Te-?8qpc15hk{upbC)Sb59F6%?LO_<$|3-0hEXtuaVm|)j z@e7>$sg#IkR`VvZip^_p%Vwuqf?^)Ol}6pPWNCZ0VLCaq=(^6SDISlG9F)dPv}$8EI^)hWYajyG}$rZ9d`T3OR|>03sV&%rzSVX8p)ms zKagKYIY|`z>52Y(57phN_Q<^1qZ7Nyl;?ZOC8QtcORAr~|02u#vPmq6BAB8iJeGio zp!iq!2NyC1rF)H-1m6o%sSGRPtu^B3OKP8<=+Stp;;~5)_b1}yVjX>cHGJ$AkS#m7 z9>>|trz$`6_*jodK-Iu+B#VVl7%Q^7rNilLT;PC3?~}7;Xw2pDXtx*)?{+HV{gKj= z6Q&LKNu*uwzE>f|40m5pb&Usizj&{mGoQZPMo3K=nQ!f^_LbIM5W60H&8bOp;qQ3i zFUng@s~lVnadCC|{piwIv2LC-?W!UF9tr8X%b3TVT}|_LWwy)OW@@Qj)x0c-``+)N zq;Zm61pes(K;1r7@_wt~#P9bm58AXh9{ucWim>;`VsX#FjPpl)6dV_}_5SS%&(BMd z6LCF>ko#KU{iltJczG}&$DVk>l-Bo6N7;SqEm3~J5Fb>x5}ksva(i-Y^Cy}5qXrm$ zKl6+O6T}X@4wu){K2((YDHD)WDwtSN$u0Mk`Ya2WsU|+Iy1doDTAm{2mg0u=X|Akz zAcc6U^WZ@qeb7BKW&Hek^rM{}%HpV)>37~TAMCkZ#2yI)yQv9l?91n7@pf%c1?_f|Akw)q=ziarUn!{3JAR??QXn(lC{kwYc;umjuR&XeIJUY~xTgj^lZ{J4G6mi&Pw2gF2WbW2|JC#O%9tSbz z^kGZr{&tS{NdRpVnnj5v^9$23gKp$NA+1Nf^b$CJX;xJ%thjbUmM%vW8n z^*G5YpvIXv-Vr<72OA%mY2fa4`2;FrSHdB8N8Z6*k#hq}IO|~iHK%58!yy_2ZSO)c zU8lHs;-hH(2XTX?)*dyETOBiCx~L+BdOU3FdbZK!2Ws@uEvbYZ-?eSlV#6U-Ufht~ z#{a*HNQkOl%eOQZLiO|~F@daJnI7~{WL(Ph+Z)AGDn^^H314JuB+~`+Rp$}Cug*c3NThcl z7+u;Iw&*Mw-6Ls2Y9~7KoX&)64=|RUQzgwupn7e4n4%)+GSwcq|8T^9kr`35zFZ*m z%>tXu7=t_Z4|nWWHN1N@M)xYRk1SP{;@MS;BqXPoetC*|@|?b85RWHQNF?!!iVhR! z5U!iOVNjdg^kH6>X#cZ_G&`#!XDh#}Syb&X(lp}AlZ`0F{bYLCx@;h;9 zh~yp$7+q}9(GDLeWSLJQU9c9v4jDs%&Py$`aCacvt)!X#do(EoFZs4GOTX@&b_4Rr z?%)ho2!`}~bUj5y$dbenszk=y8Vx51kA*xK=k~hou~aCNIXL&#dG4sgB1}X?!uwX@ z#S0CYH+?aE%>KN|c(WzdW9OIZd}tiuw(IZco1feu%NC??`**@Ev@USCG4}h!&$EYY zAu}W_*m&$$=JT`pSBF-UFamD~AJvz_kItSRiWwnlH2mkb4<;SPIoVBLiVV7^GWe4C zsG1pc>bl9CQ*I}9YA3IDlc_|bI!pZFcuoja!bgo6q0Iy~xJgM($N%XnM@`GJfs`+XUTS%c&6_3i8>fC>k`dL-%%%0Ai1q$}U=9u)B zuVAL-M2(ny8F|iBPvneu_wIX#U^T^Tjo$h@HGAGmJ|}R)402&HaFdbo5ejlt+0^ZR zOjf!~q&U@hRba@#-uifD@LC}5vi)_ZiH`wQTXOHCuEcqT=her$BV2|d{Z_38)-7v0 zP=CK)R?hyh-}1TPJ8_4Z2=?-pnO9G5=#t+J)mmz;i^4CDBxV`Hn0eXCn1YP1$Xf!7 z>m*Ix$hYcF?-)f#V_;~;#m^pme?82d9Fu|4^Z8Lv|1A%wtbMDF=M?aWHt@t7Db*sI z&5B-4Od1~_hDJs+;%F^q!ycumu<#$9UmEk(>!Fg6c2ne@MQ{gYJccKBVl@2(BEEYK z$Ee}j%JbSA-xIV`Yj%)iZ96LgCGZz_PnzwzKA+OC1$IMnMpm|Mw|NZHgx^->#ubKQ zCVg$M6ML#CQDArLdS0xX)&8V~lZYm?nsgpvw~N|^)llJ5viWz&N-K%UBC;xr-e|vt zhtH-H2lJkDQKe)v`_t@&)`yn9O+H=0K~o|>H226(E0vA{_4*&dv1rwWc&zP`2P$rtXva&W@So+OxD(kt0~InN(1zj24_Di&W}O^dPXJjAQ&l(Tp)q@$8duS8fh=|uM~bocC{y)?}5 z&^P04w&m1U2IjKcfm5x1<7LM-(^N6)tI-K5UB$2azi^D=)&y@Y_s;iS>dF5R59s^$ z79=9m>})9U?uQ$_Jw>{?mD(j9Z36B`wQ)Bt1xE6f47?5%>#v+SfwMr0LrIZIoEuXy zGd({eCAKXBp|02bLx*>)>UF3Di#t-uDJ>rOqYQ|XQd5>;4ReMx2A8II6%}iD`9Bg=Ke1|_a zX)0mlmj_`qL7Q5yXK+y#mM-EpShFtk{V2L^*OU%Ulp-!t{MQdN z_*b_@{@j)B+V@$r$)IGibCkoiDY(_3)_T{HIa@>K#q_v;LZOMb_iIwElk73&!rXn* zTeqg`lRrk_bm)J>G_S@f{`Gz&7Z7XALPWTQ*bkQIgZaFoj*$#t0Gnw0q#T!V0E6mQ%XRCCba`^8%H zEu&!IeBhyRQ6U|qUYm#bpgzbz*2GB%8tt~n`}fHmADlVcWN)vNcg4{qmA_Vt)!wL{ zJ-j7kB>>5o<4j22_fdxb+ef?XMnz;v%`aJ0ytrh!>F^XE<_=u^J$s z36&|cxL!(0{+9Qp4NYaf5->0`<%i+AmGh^QfrCG+S-#ZsCmEwVQw%vq^h(m7ZDRLCM#0ymFbP7zt53MyVR3alaEzA>fiAy-KZ>1&t*;jnsTme)i?U4 zY>X9Z>4o$KJx;289m&Qc5;vp#k#T^x?5*&6mY`)*OZj?2cXKi6x8t>y_2&&}Z?tZg zt}}>N)Aa9P-uZDsQ=`XwBbF(s2Jo@#{%}>o*$VmsQapHOiv=Dm?o_eS_BH`yRu*FN zr|2d<%q4iw`vsx?{9WgwWWiwca0Y*!?T^HIe;eZyzwyH7kMLrJgW$p6GoF9k+tsxA zEp|E=lWf5HC(hk&vn4Nq_kLwLKq;UI%WG@6>xl+=3*wj_Egg7@UNGE|=QUjpg{B&* z;pJ9c)^^eE8f1OjhJ6Mcm9;6T?*sbFRnW^tsOgPnhX9y1x`fl!A&5x54(=-)ZZsNI z(PN>MV0Q4?&LqM=rFqYrVfejMB4*+#P5OJ(nZ@5VC*723Y%%fWR$SUk_yyZ?!q%XSpkFrgp=w|7 zX$TZ$O^Bh{x+qy-d-J$NQ(H65zV;7TN0+#`wbTuUt%Sr~!*rbV(ZRG{N}5mD+2jz= z1U_c1U1>AXoFG|jxUy}n8G|c`_aEN(*Lb0WrIl`EG{gi^8kJ;d(!z`kYUb(;iUGpO*vVrX;TQ@Q2psGixh9(7sl@Bvh1Q= z{5IJk>O`m$m#17jy0?JMmC1^=A!@$&j6g29v1(d%PrFF*vFa@HDZ-$&y39@QFC3R z<39#K{q=@%Wy1sRvtz&^VwlY;#J2IB&5AX|k9)|EekhDJST8Jt)ipoWb;{t56f+>z zXMWjLUO@+rnE5#~pQR^lM)7}Y8Lf;C3fD*k)(hfgde1NY!;$ASYpeP6#B-AXRzLS; z=n2%^6lY2j(`?s2oa|Ec0SXEXA%qPri*NtF9ea2;>~1$Hs#MyCL8F|~LgK2<;Bmqy zu8wzN811sZJ=4u&ebBgkKuAm>|Mo7TUjUZe@>)W}KnTAeF{L-+&3-gH-#j$T*2U;f?FL;{_oCj*T`*OwzWAOI#G#nlX2n) zeE}@KDR}r zZ)wSD@*(=UA?3Q;n*zb!sNr(qv!kWId+v$HJLZ3Qtb}|jQsG9wgZpA>q$X(7nwbs3*J@r87^4Ae)5^K z2a2>0*Xz-bGp9WqsA5|qnYe}D+s0o=+gplV+EiLT_!W4^WbzBne%&V8mtOs53G2ob zok!erG=4JDi3d}70+xT3h4yIeLLlx2TyZJizXgxI^KzI zn*9a|PVT$m3D+O|Uv*e-Q#P4D%84V}RNg*jjYjkbWwaYMuDpF{)TJDtAaDOGYVxs? ziX1VMcejUam}!4XF}d@V%%kIDjdRLEo$8d@z&qf(teOkV)u}b4xfKY2{HH%W!q)oe zh#1?f7nJ6&w4S`NU%VqaOqL>{`-9Kl-6cEs+q-4?$oYPAgTFHtZE-4hYQ9kB3oFOR zPGt(-JTLlumpertp6b9c=6kt9=B-5sw=rH1>e5MN z^)s|)0hL8$2LC^pd@{GijDhmuB@uiz#81DHwdLNGz(6^rV}P%Qc=_TMLZT2R=8wwE z7zs`6@TJElh2Mvs+(#sNBmP$WVB=xMgYhz_TO1-LgI962?G}IT!1B7`6Vz|jXi>TC zLH>jp$jo(aoI9yYFRx3gF~Q&Oq|OoT7UuoedTgl%YL0p+J_1GB3eY$Xo-Yhh3%woN z(zHd7ac?%dAD$`EPt!a&q<#o`VDd6IbXOiU{IwT~^1{Nre{k2nSB7|Tx#X~|1(=6A z17M*NjrxVq4%-s*HTgfBWO65NV9LI5T5a9H6H~ot?RGuUtHK718Z^q%e7>@bKzW1Tk z@Nie^36sAKb;vlOEKb9Wn$)EZTxr|^YV=rs0XPk5a7H+;PNYHz)Kc@Au1EHfRQI4E9`HFdbdr^RagPkdt?0hXMx93`L!rr{T zbFb2kL3pwXp{IGhm~kjf`|Dh2L1h9`uE7#f<}@#&)I*l}ySLMLr^*2X3p4#q@3cjQ zh$iHYG6QkHIZSFt&K3uy>s#@!>Z9NH=Akad_?2$}( zN<-S#MX!c;O>z1Q-{dVDZ_!%C?4wsXU;?ekJune_`OYM*X7Prn3bC&fB}kRUO^q-RT$Ag$6rB zXGaTtUp2@yLCr(PaiYnkdmz0(5|fmi2W|;_?V`$OMt2o5@2`#`;f(-4t^gM4Y&{t= zxFT6$Yl|D&kSe41>fVN^>3qCt9D5R8i1c7w5qJ;5b4O*bFJ371k_~3LDZe5jd|qXF zTUXHPr?xdv&EPj^dJ3^i96+vP_F4+hSI`d^*U{wY8OrD{zBWX|ax$fv3h7tA?x(?$a0OI8g9)Wq_pu_Z89y6H=Gf5|U8M;K(6ZIoV&H zErnBwyS{63I6(~z=qopUP;>ZC1@Io&GBP`7XL5k>;kz=jK8$$^zRIu)jKl@Cy)03kP#T2@#Z2APX>&ON%!Bs`>795K0N)k@-d#+q5j(q1aYuhULa@) z!?|{`xdl+4!jOmjpBA$}l9Jr(6u+18DkwrNtF>3xnE~@$5QNnym;1`FCGEzn13t*T ztbG1-*mf-FNa7+UB*Z=ht7rDWJvc0cnf`v_!Z%Ee|78Ivlt--QkbmP%54XB!|JK); z2-q}5iGGNkDieg0w?Jj|^JrA!aDe_~Wt7V)BTEEzwod0r?zuOl|Lb4PeeWkGBT=7R z_LI~eH2ZPbV>02=Bky#$|N67r@y$Xltz5R>zwe%%58e%vha*z9%3u^O64!a;!+EU` zLru(8!AFJ9kH=>2%yFeZbOKPmu0Wyb`ktD@TSHE#Y*W6!!-6yE&l+wPzPMyC{_GJ# z0mW*&z0O6QD^px~)QXw!N!!WXy)RGPlWonre`@QiSku<;k&2z}n|4iZsdI_>Q^4lh zfgi|lKh85xJiM$u_7vBIbMV#X012PX2q^8g)6GP9f5Lm0WdPPD9-rRTys-HmT}dsH zE&>;FY=u#Ft0|Mt+tuc6)KwWQpEe5e!pjW^_>f^QLLKIF;)Y8*)?4H<)m`)H0V^B3 zB)>=NGVFXv=u(%}@)%zdc9CN9j})E0SJc5+yeUb) zI^e?xqYCYv)(2eQEH(upDz{3A>RUgGvoMu6h>43SRbkvIPg8xQ`U-9Yny$sW*5e_y zbLxHh$a~^@YT93XGcWP*qiHGyH@FqO`@~WNQ?UbeJ2Ib@%lLBCaAyy1X3*#FOSBn* zu(4xTS;X8-VPRH)%GyYplyL0ujYW&#rga=BNUw&a>fWjvE zhtbjolgqbrb?Euz5@8lA{jB43iRyQ7>lVjKY5k%v4+V71JP>qlM z@8{l+PXC=n^v>=2MI5F3$-8HvNzT81~?AmN?j+9^!uAsSjzQKq3Hha6;YyER|(g4G9ZuAg+Y~k7@ z0GXSpZ-s#kuIn{~OeK#DWXK5kZ;>myYxBw?7_WCf-8*co$PjvHxm9Kg`G2rv&jX;_ zb5~T@+e1u2_ht#Aq2JbC7*s;4iTKVnu16~JW(*&uy=cYGa3o{+^f&-O)SVufk5+Pk zFXXV=%$-zW1(`(lls%kY@m;!*jF)_g|3=m*pp*)GzLc1(vYM0JG@$uDPwM@=>V4Kn zP3d31{=vbtK3yL^uYhO#kYXiaD*Rgz1}wU#7vC(z)xv+!7PkSp&Qd=5oRyXBaK3F+ zX{Faz!Vp+}RM}WKRvJ1~e#dfsx+FZ>Wl9y6A}~HUR|kKn==P*`8!tvG3%i}O_CDIB z<6YunhU)KCOxu2_LYd3T#lpt?;V`xAy4xjZ5TT{hsKKyh-W{@-`)Z!(-=88;w9IH& zuJ%g{nA#kM^0U$8ILi$iuAoDNd+g{ec^}*k$%xY93jZqRK|o9;J)IX@Xi!3x^Q&<;JM{Vq zpUI1(_(_J(HKBn0I1*1QkW-&Ku}d!m9xrBO zSXS2e1P;oPd!0NnjEKqdqZybPEMn%8>9uUNtBN!^8Q8inBfYmh^GSfDOmDQbnJQo`Y@CCumZF2Na4nC6gu^RHgsh+GI<35XWScB1t z!wIK2lqC>kFjxKj_s{qv<~s2YD-L9@FtL_i@lXn;I~;w!RwqD-*+>@57+Aw$WX^#` z8uq`z=$o^B^?DGVQ3;GFw;a9Mz+Ps)jBD>~lQZ$6g(a3HW-60x1&60&(+LMHGZ^-M zrsZS_FSnf?pXHyAhP=iTZ9gt|9bh6MG+1tkIMMTq`4cJNmYL4G@9y<%Z%4 zff_3pRBj+nF1+K|&*R9Dt_h_Bgwr%n81H#V`X-=Z8oU*-r( z71+1TWr@#P)n1Ny;lcCCrzPWB*dcJy0GnWRP{Oq73_Anv4+cd9%Pn4y-^ptOgw2e8v|;+Yxr+R1I`Ep2s@3r&q0>@c z^B%x)fO4^4Pt`LNO}dy|t|vY^`C0ee&(CeALh$u;K0iNm!-2@PvCV&eiSFjTPKgRe zGn=qsvj$%H%YzW)*f%qh8P5~)qX^HBH6kusjp#_P_x&cX+Tyx{V%mF^$uzWINZa(- z@KEr{nihODTOQ#UYVQ^OwGzvMfgLI7#CgSz?r?N3=-$Ow+Mw0_j(PUag6MLH_RaBR zwfZC(i|2RwaW^{`ztoF?t+F#1vvut}KQZI2AP4fc!JHv;b+#`wEdI3VD4zi32J%@hcv8|C`YNx0plg=Gi<2Ivx2@ z6Yu$VZ%P-5ZCdvO>Yr)T`uEw`Uv3vw?fv8BdYfl5t`J#&abnUc2#*v|$7{n!`^42B zyer1=4{b=V=(f)u1V!3`qiaV04!p)|>>#hW1#p=dbWAed-JKlrljFf5(}K#~zSOF8 z042|+6C`UR^q+DV)+ETB+4o!UJv#iO9OgbtVhQQ@p(B=^!?9<%tPhN#c=K8Jhb$XV zw?%Ly-*r?G)XX&Se7{apcfx<-W_}F^3?c)8W-xp23QxJ~`~TiL+5@1nx8Q&D3O=ST z2etk$GAz*Ekyikglz7q(PCs`^?ln@~q`!6j9=m@p5|b_vks%-7@Okjx21TP5^xUSq zn5m~k5cdD8fUwXFl<%dUg2ohY;y4%bS+;#J^fs`1Qs$oB#}ZB+DX~4f7PA0>H^i}q z{5feKymQve3a80{1sur|UuyDWFnhxmlrrKa|NhoS7(|S&&5ovscz@jC66;*&63>L2 zEeoVzfxvK!PM%MTi@9;^34s;)gu|ip(^;m;&e!tz8v4wW%}ssJ(^(Jj{UXU)BUk{1 z8wZA_Zpwz?9$V7+ z58gtXXDR`|pFRpjTBV}?%9r&Y76T#oyc|+w)iM`zQ>SUm0zj_nG zeb^_0kk~^oF!F^dXZKrRV&cdw+Kc3U&A-RT7dBCv#BaVpqy~!Vb;>SM{Nnpg++@PZ zxF*O74(I=jppYy?g048*0)2ag#81NON2h*1LwZZYE7$kdcd$B!*l3%2id&#@1Q**ekf`cGl@tBg_blWx(PdO%q4fXu2v-b|U zJ4*%RaQ1d7iJ$;y%LD1IUO6=K=GQ1EGYKz(kG2=lLPHW1d}tWQzp%)m^4Yk|x= z!$tH{tMPS}-F;63F4fxb8z{$XiZQ5Q72G@*2Ew#hk}9fjIZ|Eh(9N7VAs&n z@$Fq(v;_VLJ!sj(E5N@&p^AW11q2oC0D-P5^>)cF9-Rq$?>_`aq8DZuH(u?;3kqLu z{O86zqPE-zbyBx9y&I783c!SNxnaFzWc;8Cm;V(mhzG}{B@wFceZ#J%9c(Q|_Wft@YBdieO<}!|@7!+bL3LTtm+Oz0|X+ zwA38Mp^ygG$Ek&habS6ZGU2NG3GUWRV7GQS6KfC!pf^5?p`eK_GwjsbgO}5VeTF4H?>g;O&hE8PMR2Ao{We^O(;h{7|=z?egXNQ*A{1;U8z^A&v zEHYRD1*+!6&itC++3v5kg=>%4{}bIn3zjJuRyS_`73nY<6u5nMfqMIP=I)6nF&SwT z(&Ymz>DjkJPfvarTvj>TOoCX|+`{s<``sD{e-pBZg&qRDo!RE9ryGB2-qTm%WL82% zA&Kvel)mEqyj+upu+cS!cMYi@{v$pO{oM>S*;>n+kV!rEUdmxVId$+>vGkYM<8@_) zIL}IQG5&u9iV0@K&oAPczhw#*zzRZnRt~Df_GxG@bo*2%z?(eP_hA^tsZY#Nm~Mj= zQa$kLQ{3Fq*|irod*CW$s22dxYLkWs9~NIHG!)Jc?aVu_4F`rsU+~DizYU7#b=(1B z9yOlgwEU{6LJ_N;b_BqhcyxopB9^2<*smu3v`7Ljq2>V(54=@u#iIBKMn!0x{@vM` z`+%PViuf^a3Kx5pd|lY-HOzsI@T553-;4I@uvM;?nO(3VB#Kv8fy0L`bu(`b-G^wJ z>f^_(x&mKihO0gBV${`jPUi8b{=_T8fDR0lv%`h7)v2o&5Ig~92rTHl%=~%vI`857 zMB@q(OKu_SFC&+y`qpv~DZYQM=3e*tIDm1LjgwW9go;Ax@$_I`==`?>C0S>pdLKd7fwQ zy<)Dp<|2wsNoZQJ>3;eVv?UfT7o7x24!iNT<|-`lP)JzA3lJy* zwy+ud1efZw8a{h6Ygaem5JR)RL=$V-CQ~j7N!<7Anx0t$1F~VR@7uPH*6|@XoIvDa z6@L5HW`vBoA>I|;^fIFUvi^ggRZL7*M%U}se>@l|cME2b32E!HeRSYC>Uq??zm`|M z5pd(mg%p`S-?)+j@m64--qa;<^0-X|(VVQs@y63-tVx$`MJSg&9laKZG#tAzUJ6@+ zwTW6+)=z;ZwvnLcO*IvjN+K1SL~F~SXQT5p=Q8gi9xjdqj%wTw(&e4m5ILl%by{c^ z0(!3tBvd5^WzhU&^kV-jGOsuYobiO-qWFF*MKTobgXDbcD?irZXp`hgh$1YJcLSyj+NKEb9t5Mu3;g_TlISYNjGVV%bH(mU}#s5iZ zaFfq)uss6@?oCf4p5WIBM6XBxTwfJy40GzRLiu{&tBH`nQ`dPXegv&o`2*m|LhCj= zGKesJj8%KT!9YqC-}12zt8VnO8`rG&VM&qJVJ|`+(SvjW3> z28!TFA~g#Q3zQL9>FA!s8Q_r;MzXMr;KX-rv}o$}1dMO`Pb(m0i^pydu=MxaZQ@G47Y%DN|BT$lL|~j-0|H8SWsKy#$w!b+ zdAa=j@lPS^^=rU-7bm1CX+$|5sHh-zQEo!W_^mY9?gfZdCJuE5Kz5GP zXwqc(WH8HF$L8lhrIha$K?CN13u+bdwzRaW>%MCIl99Ndq1uOyL(*!iRpmtzPp|@y zvZVetn7*H1HR5$;Al?B_Z)eMVZ|@0W2K&pTM7c{B_2!o0vDG;_J2R&@}t5ge<)pYHRiA# z@mxl(IEV^O*BZ?G72jh4;3t0>-4={a(|(|eFRP#=rUA(zBwOH_hT{R_wtD%Eia{hD zCTI9W&{@b7CL)_o$SNkprThbjHQd-U2t?Vbp^Up)Jo#BpMnOD(3^Zo>y*E(+L{%R{ zlJ(yHxd0SnZ*@5;I5aXZu=$!pkQZVb1$bC@PuCXhYX>%thx3>)JY9C7ist9%kA{AK zr6evTK`g@C)5US9-v~+Mh7#Go8c!8T390|e^L|wQMFzf_*T2H?q@zmhcKAT*0j>xN zcAqWB3h&7ib{j8kYQRn?0Lt@wDE>tRI5uQXT1sY*AftDPCW&QZ3l>~z1zdR_#4o;l zwh)_>X9CInib?J=5Lfg)P^*|jn`8X%LOH{Plhk7H7yb8HXAZV0=Vv8jcsIlnsLto? zXdmZ!ckj~%oq3$+3khv%J<(<(a@^XZHuXItfp{lrod4&^W={Z_NNE(Z_!&x5wX(XE zC*|R2;Jp(17A0tau}V*s_7l ziCY#W~c zo`xDM%(d)(1X|#0_*HF{$g0m|R$>C9ehy#nth^x7Fyts~*4viYe{`9P4*@ersn-J8 zteYjB!LL(Pr66)75O{SI<-SxH)1AT{;;RiT2oUj>r?z-PmuJFx8A9Z4@(1%AtHwWi zPv*+fMTh56eQNupNtivbsrLF*ke_*=rmfo%EAV){-iP3(?f{Ay6}Mo@)GkSrXGV|V zGF~45i1)#6O}@}6Lcr7aLiDCWpoxr0mS%APF{DDPw3(t%#RnZ-FK%}lDDO=I7zwHV zN1_a`4;QwzA+&c-)t?WVtDOBqx*n+_mu(zL93U!HK44g|8!v>xGJBoNA-|q%a_vwG zeh{v8`7Ea(OV1*R3f_9Uq`uLnD33SjcdzT=s@-{uJ)Hj!0YS8AG|Sd%XDoZ^D{vp2da%9Ksu%`Ox^;@U7;fMxYu_nVAkOCH1F?q% zYYk$?vTO$39xROE|HVD_9$TfOmG3 z#1q~?3YrBIWkTH=5l(62M50$qWK-W$ZD0C4yHL_gcKFA(ej5{xz=_RNOTBgK`g9S; zSZ_n3?_KEhd{3V0ela1P?72&e`S59o!Q;IH`|xzZd&bbhN2+DI&A3~!l}sO=WJeDg z)2&gW@gR?sGf&fgvTZu}RREq6rO%~VsZQ~)>&1`btc0`wY#we4IyFZ%5GMFrJ{1L=QFpa>OSt)YbzR-hD;I7N~@Hj*Ut1NJ`y!u3EIt;=~{2>$4ShVQM3Xy zw!(d>)l9%=nNxm8rz5L9osN@}xM-xgt52vigf<8;DOIf{u7D4(zfbgARxv4&i~iZ! z7KQ#rMGsSk3frFnfB_D@<~{s=LAdDM)b+0EH+cN%duv)wYu3XfqaYpdF~6?`ANP+f zh;{V-JG5=xu8pTRVm3ZHA$+dbpskWb4=burzG@ddV9h)1%}8uCh-U5J`rzpPGT-TzRX~GYw5Ke zVRN=BZ^7E{wz=Q(QYhdFVV8a%WiVN{{L?;(6isONF?y1%q* z{Kt+?we@AX8Kq~^om1gp(5m`T`mWX{`FYqxIHj*301eis>@-}=cjC1pC=>DVi2Abo z>N{@VTOAmDy}EyQ*SIfoTi1K$zSHi+!zw9xuY z1c_=dqWczWZPo_X5^zb#6^*@xIYiEwq#x`+%jmbd4b5AasQ8c$$f{+I*I7RTeJG@f zAFgz|`jn=0H;k}*siWy16N9chfiZ&u4F}&{tptlloSeZR&E}^-4KbDdc?%cT&cBeI zx(jgJnqdLTbG|`Qv+nmw#=@EhU$-$f8aA5Ej2YnKl1h1r(?FZ2?w=&|m(kPg5x>&j z)}L?TDKOyi&GNz)!>OoA9y4$YES(ln{2v#f=izAPI-fZjz#olK4VVPi0lz3XC~EIT z%}Ab=37s@ly&q84^@q0C(5?N&>YBZu!cbkgA|Va6LY%ja6$t8m5XVMAfezZ+`ca8r z9!tJ;ce^8b!OpocQe*Ca3o)m3;$}ak2i>Ch5X_`Z?X(QMZPgukHW3q+hVUXv6y**3 zHiYWmDB8VrV7LCS=d$uhC-$z{pz$c@m7J~%W37R1SImRze&;r9L8YxQN?#H`CaO^I z{0F;gX^CAkW4a-%p8qZVz`pF|#6fvq*E>|F|-fNK6v%cTQs@2Ug zuyNG=$(PYpWDlFAij)!*sI&XnOjcXCl2TK(P7dzDQO2@I4;daAR#Ddrpb_E@O`JOe zBFDb0oRVbmzy96>z0|T`{z-Fie>E>Uk!h@+;%bVJj z$Q;>_k&%;9HPGq$Dr*cT=4@lxUCK1VxcArDuZ#08e1c`irK3k7kr9$bBkHw? z33EaMVY-JKPSHy*1!(UBF6{Ek8weHwNk(!`b@UX0Jep~=0NJC|Z?Il8d>6NfDmY3n zd&UqBt&G6fQ`3F*ZfZpw(gG*Jy(MM(!WYn1xYS;fBIM2R^W6=KUx&PwMMV>pS311K z0B7LG&=IgF;!&#NC-XZCMhSn9Lx%T}RH1KY5W=1=HjEI@e+hM9Wn+WvQ{s=Y>!m`sCMdRjvTDH9>d-W+4~>}kT!trdCN8lNdpXFlN_R!_DIq<_cWOQHe}1~ z`d2+%*MaAKSte7tFMs#dcRr8~sWyAcz?RycV{({O`IZ7tzc!gm*+9C%T?_j8vPwsi zClHO&Y1dbr*O)f7AE$fT?W)HSj~r60b*Et%PfrM?FLa~5Zldig-Ae6Y^;l56(u0{_ zN=HxkX`Oh}^lbE!S!&$dIE=MboMr|$iZx)uWSX)#I`JIKvnZpJw0 zeS%ZYx)eltO_9ZP;COWNCS7iI_T1+tR^3aqr;+KaP3`FnR8;S(f1YmsPGNgWlFUO` zrR`r+lYGyt4|(9UvyG++ULLgmR78>#M*|~Bdm;{Zo@EbY?YRlzudZbwV@Ct)>9%p% zj{}xfue7`0WIUCJC@WU3wV-TC>)-Dv*r-vXkUgIn9+Q5n**67?{gv?Egjrv|ih=UG zrbg>~yGuO}UShxH&IFHGOX2mQ{`TFap~2UAha!!4@HI#kP|Irs}&M+ z9&zJmF>!lb(oJp-)N+(?T*p<0V*yH8-uGz*Y)EHUC$(?|Ek|8lT3`Le%>bA$*S^d5 z8CwD;fC>#Fz}%c%*ZCoyhe=K&BhXiql1kdy*VDA*^4O!9ZpX_I(rc<ipV~Ir=!dx}Ku2!9j(g1p5;LtAofYWZ>1QLJ)N5|62 zN_d7`u2*_q2R(v*et!GF3}iI$h=_C9-Y+aF&djXG62rzuxf$^P?iK$I-0MIBRVX|r z@i>V|TuF44E2UV8E74n7Oj*%%H~~y@RC}rDS?{ROrn#?ecH9xFeu~sUqk?f+?dhMU zEZ&;WAK2p|4uXoOJ;r93Fb>%ruDUJo#hMuOJubY%-@ix1#S;Slhy#cP)S|-{o0cu5n4P-c)R4y047=8P^M5=7XYd*5krN0 zzb(otVB}xOc6un+pOyw~Zyy5syN)V!NAhOpjIP;yBzs&R?%>9gD2rcX*8g14U>#9R zJ2R#9Uei~4_6qx&O+1!%gWH748hF3ap5K=9U)Hbm*QVM}q+KG`&2lg|g4xI4L6mw7 z7Rxe739(-U;+MVqw?%OvEQ|YVD}^r!p6$Bt@A2OG(u4A)pxt%8w{)kXhoqJfsG?U^ zD9PeKEPI6#M7;Pi>Z{@g)W0z_8n!$Tz4D^`s_hR&2Q|>fSCzn*L}nZ)KC-53-@JZU zvN>1r=yrFuy|sbig0U^j_&}O~m8*ur!voPXvO~gIX+>uORR5Uuael8!vo{C5p(9`b zVpAR8ujVl{H#f^F$WpTk-I7(aX9nO+=wefc)#uUz_GNBneR>kYO>HXIh!d42l%`lx zvc2^D0?A}Dvay6Ml(M&nhR+fo%KZ475fBi-kEv?E_`urL!bl&JmJ%qH4b$tjyS9dt z!>Ua2Nvi_oW#z2flVeGrN#W9KrwUm!EHNreK6_R?(4PQBwpqPXhu>`9KPILsnz##C zMx+1*6*pE*rD&wml#}?(XUykUIgzZ|zC5&Sb`>_uy?H~YnpF%tG+sKx6FmUDom!hh zQVU4*GO@8||_^^ul|83;THVRrsh*tvk*(yALv|o?Q}5$>AA71UGM1N%=$APH??~iPQAT zGNqu4*=$VSisbb2K3Z0ztMR-ZQIfjO#_Q+U!G9+rD^+I%&d%7kBG}-ac-VvzItf2bzx3i87g~tFkVTOS!X9D9 zvHqMczwhyDcUo|e&LAA8vKF|w}~z31((AkVDf};Wd3a##Ic}70@--d;%^`%SA*Oc0pmT3Zw2je z9bB8}vXOK&!dSA|i)*heQwxRkKIB%T2XJvGC4FOFe;!_GT4Mm0AZw1D7srJNrcBtu^kvZ=!bE97yF- zH|;muvqMpBS!;PO{Pkkdn8tXKM8FvG{Ki(F{*u;rxlddr8R5)<=L2|8Ixt8!NOw63T*@7963+5%R#Hk%4;Lo^t^7txd$ zWpcD!gpPp@g35cXwIz;=^CDiFb=M4{)@SMOX)B0%Hdws2cppBkH|QGOtp`1vTQ0Tn zpatcDMcjN;>)U6E`s_90DAOADDU@f^%ELn_t^7>^5tGhK+p) zvkVIX+*KlsZ2J>>P*}($T|9vOF-gIr{hRr*hxXx*8GQY2shDB*PvwiZdNcS#rLyVk ztXvmAs+|eHA=xY{Dqv#I`St}D@-FEY353R6sayd;c%&4vy~!W(^}3t3U2q9W6obAA z15G2l81^Trr4=U>X+YMr@b?91R5J<*2C~cJO=)O)_?=V{EiZNQQLo;> z=Fw$Z${z;|hh7+vd8a&_nKjKN`4e>AEvX@&zQ7tY2YXIoCIz&&zR(rge(EeqX*j?3 zs=Ej7H4o;Rmsu#c2j01vA>_dPg^3zV`pe?9w?Nigzj62 zp1m&L%z7PA%iTL1uiAgwrtf^;2eH34Wd7vxu`CNg%m9~!RQsL|C`|$H0VhUPHHX|R zp~XgJ?$CyXJoqff|E_G`5sI(*Ljwug*=X^5ZUefYC3#@uiz7i7E>2zBifupJu68 z@W@yagq9n-^A}@1xv34%=6n6IC@Sv96wlo_sbym-S1Seik7gN^yviQRJL7?Hr5L{I z#*+^@65NQLM;&ByL8F_8k0$DlbPin8ng`zg2D$!F8q)0aBnoCs1EGj<8)mTdmh5o{ zXW>Mue}}_$JZL=)^rVk#aMLvJc5t(NLRD8o`Mr}N9<=XmUbOj^t5zQo80#;P#l#w64%c84PsHv06n$ znfI_7|L(_LEZLvb?F%~-b=hl^!Iz`lq^rWvD}9fJcfom7&y4bc^z11~=!2Egf1Mh` zZxR7j-uqb>%z{&_s_ZoOCXg*f&a32!w&TPnva>zuNbmAb316hZlVQXw`Ev<=xH>Ff zci#q&AYs3pT}-B5e7r1tTee@ zW2T&0@QvyDi3ecQJx_Rs_or_kPFyTWK|&;AB6){wrZy~i{ng}whcMar8SO>2b(UjE z{j}W}4ZeU`WQEa4K3z^;nbnX-FS)2+`%+u}uYz|V@X38TE1l%sUI(DGIBd#lqx&Ty z=Qnr=OrXlHOs)}!ue&_|t$1s0pEj!D3<<#d&?N$ldFZr3+u04*cc<*_W9G zg+lq0y9l3#pDTd@D zIs2~rZYv{LK{EL;66i=4&mA?y#LOC-?|N0bOaiKlDlqlTeytlFy5nu!=WP{I9Um6E z3sZ==t&o8DV&IX|jNATF2^uHrqT^MI>a52(PVXmvzZuJ>rTx!CccWutItw1Ya`E-7-Y3$qRgM&Gm5ia1e|0nih)`L#xU3sXw_sqX>PuFxs5SrmEmd42u~tma991SFu-RO(DiCG`mcuEN38oYBIf zSB(G-w!P-f=J#D>I?RuNNuMzI>VSeQY-m;E~x~nanM_mW9qcr zR!cPGi!cYvrK+deX8kf!9CgPH!7So&Eb!%zJdlkFexRhgpFZZUJzeZucByguw|`dv z2FztMDz`7~mmb%alqU6bloLfLNjvG%RMOy=UC*uY!8IwfQ()&u;nu9 zy^mwyi{Onn$pxh)e9#1f3;L#GCu*Fh3kJO{Kd(G3O5Zz@{Nd=}^l&f?=);#hLZ!7J zueCe$mR}b}aHEJdwlCQqE_C&k`;4?d85x6qW)<)Z3YiPx^$voLgzRnD0OreHNzQt0 zQp2u9ha4IX9uov5H(MAO9J~Jc8m}jzgGgD_kdfI~-tkwrLwZn&E!AQJ?ZaVhw!D73 z^1NQd6YEn$jC;}x_O)m8EBVNi@Vnh0B)2AHCY5jAL9qW0F|nvO@2ThoX=wkR3^LTZ zBG*BC_IrQ4>LcJ(tcL+>3c7qJ6 zSs1yF_q`K_dob!Rp?#%IbIz}M$zcgE1vh|M09WVvy|bsv6)v+nP8rT51E28FL+?vM z=>57jA9qSPz8bQCR6^h8W zIzRPvHgECGO(a7oV;DT%e9ZMhzs)F{>8K5diaQ)6W3Cn zO2c3@8YXs3c}2X@b3)7B#Dk^m47_w4WjAe15rTXSEziHHkgHJ3(0~&{`EciTG2f8N zE5)5vc$8;^hPjBinz`H? zF&oy@+#6MYfJchcS^8C$=}k%E8WEczI0fKhn10uPqsQ}c{KFfEzE7qzYm};5+TCAH zXOU5()*&H$p2#uJ$ne-GHeQAd1eb95NgzuAHv%aQel1_Ibb`sx6OzUcBx3-8l)3(W z@b>IPK4tSSq7D)QHSK8W+V0-)H2yp4uTU5t>=;k|EU_Qo{;GdPsP(dkhJ}|x(%y&N zR;Vm#^FR$wp04|h?$6_gDCON}AKPDDw=%{N5!u};*Ui9Y)2h8Tya(l0OM=)a9+ z4YEfz5K~rNI4Czk4o&?zxl0j2P>+-jx1lxH&E@i=q->z1#fQJ-vUNrP5i8wlZccR{dmQQ^43&ZyKAVZMjycJ$k z7&7Q5q(na3XMDJ3*QYMunZk{SaucKx+2jLfETGw@ zB6ggeTq|LsddDigZWpV&Cjzby)RZ_;P}Oxb#GKukbw+(;cP1b@>f~$5uYA6+@8$+u z=T+UMA|#c8jZ}k>u@n!)W_U+J=p%I9=mjhlt4(As_%kb7A)`7Q1U~2mMR2Ub zk{2(-()de*q<4S3Z=b!4u756_1q5;Ao~5imV1?WtrWj@|;zrl$Be%GgX0w@XL+I4D z(ZUkPXLAdi#pDZ{rSbZif)Uc#$$UIwGQX-;1)mcEhraa1vO9bSlV9#TZO>2!0HI-N z%)oF(hr{^0*1%n28QDIG;sF)?7cy{5j}q&|2U^J}^v|*KO6~BW)B-uw7O<2{KHtMKocBY&w40M2-|Sho5mA~wQAcSH!b>5Q6rXciyvV^z zAdTQ+Ar`~8_}ko;ou*eo3H%1#k|$EEH7OmD=xAv8mre?4(W zR)C}9AMmlrwPH8oKDZ^rxzvZm|61PCRkiHqa+#xtqGv9m{IFK~dd!9*m0ZfB&Hv*q zEmlPYXu7otd6I|x8&v&U$9WbnP*X=9EBvyqf%1hGZJ2N-BX$M)JVA^xZURWuDlM2e{92gtc%kvI;p#{^oB%ZtK=EOy_ zzWxAXC{fNT4-*WjY|Z?Pf$ss3)m7*K|D~=Yt;it%QQ!Z#0Jo)0V+L}cf*HmNt4YSY zd&>Q~86P!*6sggVJiMV-zoj30UB)Aj2i*E^J%I11dLohSvxsQ0)}erq<*qem3f0du ze0lT-u^d6&Zv*hiDD=r^G#rwGumKpO9p4k4z{q5Xx3Qs( zG~i>q+eS<+b9YT6@OuiFm}y`UME}@S9PC^5sitLwNfjB7{;Ey#_+3nV+aHD^FBj)S z78_X%q#z$9u<6G6y^E7)r?`YTlB)Vx%5#p7B#9jPOO5Zb#HUOViuSH|zN9UDu{Bj4 z0;U!l?vt|NA{71cx91BEg6=*8UMyX5Xq==q3*RKeKY84L&wvM>1?DHEY3$Z1fgRHP zRTVcy>4U2pXI@;BnLZpI*==xui7a$F2u@VlliP}(@@;88N*()314a@xocQ?#q(Y0m zHDpr_sogtuZkIh#{hP;7i2=<9R^JWg-VC9@9JLr8X9mX|kLc0@^RK3lb;f*qf=Yzs z!6EF{@1cs%-`1>-!7Jba~7uc;X2_N(#Ez5cDue^c5Q$0%#e4y<*h%NI6Ri z0Nb7t;1mE>FEzpXL9#0J>xkUr0ZAkk8rej}$J_^Vidw2S!A>ePbg?sX@kETmMT44J6y0$!hEk~;!x z?(Us+ta@dHV%U&=5LtO=nk^Ho>q>IfIAR`mQpa8QU{IX?s(H`zyUp?^Gs@iRFMh}@1fWV|OpLY_yHdz} z_S~j1iMawj({<%Q;YWX>tJTrW4VKNJH*NWX& zA`hP%a`F}hd=m9)4-4CEfm8f%jdG|^+LlhXbwK#(+2yCP(y6a4jNgr6`t@MlE)~RR zt?o6y&W4{Ovrx}!uf9aeU#&f-*Ab(vY{$InAVD%og|}qf*oIFQAr-k4qwy(lCPkg&2`Q~4extRz{AFf&oBuxdH8+V? zT@qrlr{zdSGFKf29>82@8rvym7m*7d1KoeO9_0(SuhM6zrm9D7#anD6{*b$+**G!W z*6Pz!9(;$j?tg#X@Af+9H=;Tl;sB5z#vki{r7gdbOoQR_=_bi!#QL9L`o|#61S~up zVw0@vVy&ZOPXRxs*xD78TxpI$O*bjBrSyjP1(E2)w2BhW3d`kpOK3P5P$nL!+n3+S z$ofabB$^VGbyz8esn3tDF3wNtA32ZUM4fZKqnCGI%HHFc0)VYETDo|QPo3|giZky> zdW#X*@uOijjYA1)s8T%C~gvHbCq|OzVfEfr}9#7RELBL8PU_p!MvdbRiR)6xY?J z)dr9bi=B5y62CG(UVrLD*SY=JoQtn)t$jDfgu@6Yrdta^1;McwY^`8)b(pitiUf_8 z(*#0*reXa1J6I~%U51%E+U)zR!urVDPpkV!e{5823}^FYG2xKl zNdxhZ*3%Wrp%S@jkC#@@B-+}RAfo5{GUW46t?vKy3wia?q#E8rb}?w2%+b_B;@J>rnODfq8jtT$AiXY3pkwI$^`5=%A{&Y@&D#!{{6DN7P2?+o{&6sw zn(RF#EYaGEXUGY`e|OI*htYCQk&4%0XYGr`!~xBB108-1!oTOc+Hr8m<>48_!|iRv z(P9GAUQkjfl|6z(ND?4bB$+v|i3X$W@JS)+nUU1vJonuIyfF{h8m-O&0tywjUWqtKGCPz{Xewn|UL7`d>@f z5$O|5i1WXDKXza1`#gTsLsa>M566(xVJ*9l%kB+f6SWbPcs6*+Mb4?cAJfAt3F?VDN&uzkUbXPN3Eu|J3=F81J2A_aAsJ2X=yub_lZb&T z03G+^_l-%|ey_#(YtAuG7#^YZ?}z%#$JIj){p0=4;X~a9xzr*vz%kozUQLWtvi5vLUa4< zo7_t`ico~eHW&SerMG26+A0c6Wy0#rlaA?s9!vh%R(gU;?&}PtMz8{4765PT%oW5Lz%!R>O5>|QroasKhk2k+O zLPrm~A@&SV$+9WyS3Ras=)=%Tu&>d*q=m5K13(%uV}M|68Um03ZHS%$u34@lN}pdl zB6T9kD@D5sdiL7V6F-)Y%WE1`&>E$4)o@?}FnqJ&cP*}hTLP6KIy;xaCF{90D4O7lF<4-L|Y$*UGSH6=lj1|>pr%nwvI^SyaOaWWK+R%Qyr5yElu*1 zzo^LKFi`Em*furk>A>sdAt33P^Gl5Q2>eA|fGGD&`F+^Fel^DM}sdI=# z*j+G-3HmlE0_X~1fuVRKo;PcEuMYJ-Eex=`;UCjbS4}3H+nf)!4(hhq9b1zhZ_>8) zYwD_DXsMeWGoO%Sn zwtlSH0-|s;8Cs*i-S4AyqMYmW+xQv9&BIL9BKFREYKqdy3|F-y|1DdB4{# z$z)5M2~Y-5)%OD&@QJsr(nR?0#B((O#+Foh%T!gfl4jr=e10hhID!RBQ8sTrSz>GN zT<1I1Lo2$7(CB3uzt_(TyNakh54j+qGiUjhTlh8d=d=Tv=|f+}Ltp1)-smBNe;;!5 zk}tROJLY^aybEHIb_(h4|Dp4CdLXWz6*G(*dfv?P%Z4L#@VS7O`^=+C`rFPm1WYMi zn|4oi%`pY9HaD%ls1o1uhx&{ovmezWz&K@7Uzwn^gb)CQPB0=M-=KKk+0BHbC*qh1 zQXQ2d9$mR;W_$?c*?J5IZ8XF?zi~SemjL#(2_`Q%zrlnY zwIzsnig`vy$}M_WT&_wJ2AjQL<_yp*FF#Si9qR~eX+lJg?~)oHWRu*=0la`0iSY-0 z!>@G0l%tvBswHG;1liy36%d0E24bNo9#{jJP^J4*t`>Ue2ow2MuVYQFa7-l6n4km& zm;WQqg)Fi6-7K%kf+1T_U5YkKznm<8wyldai%%P$Lbn#c|y3#NyoAtz3 zHbmU=T5|`h_PwY`vEqTjgjqw0h2uYNCdmfsn!4|38nKb^F*`jB(C+VGf-*@#;g`mZ z(jjdyQb1(gg?|siG4L=u0 zm!gR`(=g57BA^ebB!Cc#kU-H2O9vLo@&2(u8b9oqA=!q<3zAKM!D>H7MbbNZuZOgs}<7C zu%hw3CYr%34_-x$;|~(~E&JY2#&~em5;SfE)^R6fR+q+BhnoZkM=f4h7FyHgxIL%v zG-mbBt}ckZ4jVmZu*CRd|D$*I(-X#igKv z4rbGny4_e|PxAMg|NW?|nS7^dZY#TZ_-1n2)#m-~v#DDAZTm%Fw4 z{DMT^jMoG?c9WYeM1*|K{T34)cpymnXmk9bF$w7(OZ4+wm!v5EVq^(j&E>51VK*^R zgSZGijl-LW{eQ2x7@K#!Gb&;2nzz$p=M2PFenq^1gHuU_iHZocuHXaW=cNk!_lI33 z``+<8?90{iy{5mvnDDd0XX)lR$z}I7W*(6|iBq2Q{uDa+x}I;U4QKU!e&1(muZRBB z&{aXH4g=MxfzPrftEfTWH|_b*z~_jfM=|09G4gVtaoYkq+uBuh^a6fwpTL0i&UEN2 zUqnAF=L2QYtq86_&XUC4*HgR^$5kKR>et5Bm9gHFmNmM$-Ad0;#@I&e6P%DIMV4O5 z36$muysmN?1*KLTG!($3GmDSd_0BQ_$%WF5?nW`vVWX8Awc_%Y=t-`1+b9TLt2O^x zxHO=r%P(iNi3(aDcO$#+?2K~zCTn6?G$IH8*8)zZ$zA`->dN`xFy)q4gr1AXfk;ve z05yt=iqJWNy?57+E;n!Adw6UFH#Vd0lb*oS@rPaIOw7Qv(jrW}+;NO2nfu)1J?{g&}!?sH5yU zEzoxAw!I-u?0U36ebpi=vmN)oZ%mGAzC9*7se2aYdLf1K9F1ueBkotmPcq-`T3M_G zX8EGI<=e*HQ|kZ!1awFAYbt09wTrsl+%?#v6U)fZa;@u0dc=5Oz4f^#9g(VI!9k41 zz2e8nr2X;v=DbbX=?)ulS1BXYT#7_q((#%!Jaf zocM$OS>W#LsUvs?nC&k)0(aSvue*8SOV8O8;)D!%r6RiVuQ)8jSeScQT4H#AJlx@Y z5SilVAoBB4;7>V*U5hDQ3|BcH^S{S38yT`~V!~SI&6o)d{CK5jBAcN_2*e+v@jBkS z(oPqEQbzEyv&r!sQkAyz44Yz;WW zUpD=iK0sis<<{J67e_!;&Zn_r0$%GiYZ&6gn{Z%hNiG$9Vfwv$D28$^|J<9cYD9w{ zhW6O?L_k`@&xeDH1&X`+!?~M7*t()mJr@XvhwF9{V2ZAVn3;6y>xO)D8hME3; zc;?q1!4pIIi>217ySM~+^5PB{k=^Na?#G9cTUG;QWgni5jt%Gons|D>@37jF6bRh4 z7^Cd=0jowsj}uCX2I}9P+TT-Zzty8XOch2)^}pn(R+M*qyq%ll+O}|51%EE^8v~YR>y{2!J z-%de^^;S^(5;e30qDh<-R30;yp?1=$fgw`a+b~Q8n zqz^Ys{dqkXPpi}nbi!yEcoPsU63{>x`7?#3KI%iu8#1F=JN92YrqDCk{2c}t)3v=DmB5koJG5l26cki%`G~d#)c)8WbadOQDhz*m&QF#s>~T0RZ>wHi z8*OrER&n`P;H&nNsPY*}{)F!2&iiZWe?&#oBf1=nNbX5L*JJY>dSxID9FNGPA8wNl zHG1zBYM(qAUI-2jZr(omKlcEKLw{}w>ve}#P?^ymaTXW+vgL_BxR2|mcV(E7@A{(1 zD=kXC{YW`sO$;b0jBg7S{PUeqtv)+AuAXEof{)>r?OPP<)*Qw~tOvj6rsaBixwk$? zlwn%)#El&+tn}f0BJA69jG`(t6C~dIj*y|8j-Uv-pH=s+PgzUz=4_Ho*3i;MaEty1 zzn5b1o5&4$xjD?q!|w>DnewoXoL8?07ZI6$S2v0bGQvtSNsKZ7DU(We1U?{Cudce$ zUJ%A;hjeVLf5OUG#W&@x5P?0X7bKLIr&e)p8JJ6@O{L!9<;)O}#dNv7LpVe$F#>cNx9h!Q%Ej%B!n+EV3F_ zxEq~55I@9Vpb%Am8&O7=mj@29+WO0*E(=?5aNXCh1tb02$lIQCG)RN2O@hgLF4$!0 zvcc9<{NW}>oc!84DPS`(RkZZQJ!d?Qn+9O7E%fSo9Fafs1m)b8YHJ@k_QWvu84s|= z=7z)6(y<9KvYz1imfy}Oyp|*FEZn$!F(~NVG%(!%Ojh+KD8uH|ZT@li!t%$yg`N)N zAI@?gb#$33TYg~yv9rbj=88viK~qcATmtcLKT~nsZRiVPWDP&TIB?jz(6*<`%ZZ6j zlY8-&RfE7}Yc^h)6T8u&#=NMeEN1G;4l72Li_CL#w_|JKp)uyohNy0eI)Q_nA#;xT z(~q^nVVt)7%od;QP;T9n3&WVGdlZ$K-OhDVute;*46li{-l=sv%{4>J#}n;URdaft zzi1=(CR4nBAAkD~EiqcZ**vzA$i*!sAxG}|QP7Ul?^So56CeFKvNiteJ*b8D3sgLW zDW@t}?h`s?)^G*eD`vKg#Aw*7$G8s@8?PmuCGSLze?V#erH=yTDdZnSh!)y5i823b zD)0#C&SzWz3P(F0|aIR5*#BkkCyBDp(YG?JklimdpkLuy(C7$`*weicmx5KnaPDVF9 zUg@k~A*(|s5!>%++G^isix7SK*p3;D()86U?n4h_O^MMWYH=_x8Dwyj6B}Li3)C3m z6TeAG`9MtzB@yrsv3YUO-g-B5!X`HV-A!L0cEU z$$4bvJmPa%Ufj*mqUggOn0Tw!EyJKcI#d)u1D#f6q9Ti8$jI#W8}+_rq%}<$8dEpm z&Cz~ItcobY5?XtrV#3oYB_(LNZVWK&$7}P4!sqrY!gJ0?IO>v4WTsCpiKIZOQIa;& z96GUUk3Jk}>mClIz?`-yChBP21$X1vAfw>h2UcNQKST_>ml{+K0;1F!QO?E% zY}sC9EbMqnn-$09Y8gac`&~x(bF#ng)YU6!Lqi^zg8KLq8)i_67ri&fIAk5XSCw$c z;3;5f=1=Uxd+Z)BP?TrA_-I7AN`vrFph(+avjYe3Kp5aACb=-K__HaNxvqn#J%FI?Ii(8!XrRKnUd zyl3{io+=i-Q-ZeJ@=*8mD~IX2sVy%}6IIu6r0V0RU{4w1Ia(G5@S2OJ-P~_Ic4E@m7qDA_4 zyMCH!inR@aXGXr#!B4KLvSsFz&deuwn%Fi9*ggD9wQfg@iW)z>yiIlXp)B+rd6BVP z0ZU|~3nGvv%kUaWy+bHp8TNS7jhbBJxp#yj#|)XfogVm^?Ydn~>pd#tACmWT>lb~5 z@w~$La~J~)O<6T|$t*YT7fd_byf=2^-&($`%y{pL|s|e{$yu zPHcjY_Sk)T?NX*CYlW?TRnwOMl!F1sRF%zB+$$pLc{3>)MC0`6U2I%F63*KI~)N}0UB#M_7HDCwj9%tVr z>S0$IXFI1;w!bLOM*M3$GGX>CHcJOJ<4n0K%0|sGrab#p2$cXP{={sJU=WXlAV9k# zq?tqOWQv#2`M&!oE2871TST+I?W^P2+348GF=xwo(dFjoNHx)S_Ig!82@rJJ+_7Yj zXdIdKD?P=Y&()Z-8h~~sWZl9|G%i(yqeX`dv~;P#6SR2Eds54Gn>`LKT_X)yV-!r$ zoMEHcQ5|uMa$tf`wU5*&j9sxClxn-!Q=O~RB}tA9if*B9PWWqxDe91cN8O75di9Ba zr=bD|SiH58X=v{@@nP1&eC$j9!q2Z{B6P*gj}fl!<|r#S*CaObT&lYUfQM4GBgTbl zMDWPcb2aDb3$K}DylIFrdq;ZxY3|WQ?D$KPOUyerZe6xSDU2eiiM>ogi{(PD``ka& zvM;#v9(|geRup#O#y&rMV^OU4QdOz;h(O|CxxYH~u1Jv6)IykxM6sHadBu6#c1P3} zT_!U#FX%OixsI8_RI0Dg=mcD5c5!kEam5JHs6oHvTRgFDV@5EB4{Xm48Aseh8my*V z{k{2$y^dsvHX*_PXtaF0)h34xbG&^dB&RogQ#a&tnEd)!t3t~WcAnc=6*$H8{(+zZ z=hDheG!;y^sSP6>I~iq!Jto&_m}$Zu)5o>!Yx+-laCXrMa^Q{@*=>e6C~~_H!s!;5 z(?(mbI$+usKEEW6sI6ZRnYl$?UA|xe!)LzTW1XeKo_q`jg!-}_ALOaUA2mbK&Rk#@hC)f>3;279&FMhi3O)MP-qOY&=A;@ z?eM5IMtL1GGnryeFB5!iB-UQIuW>W*A$mYcTB0lS@@vl7*?FMHXuiNeO};*nx~UsM zxAMhJRal7VtU<(f+Hz(q;0uV<-1+eC?lMZA~)k$4Zi&-T-J)aG;-M{N-yA4uEjq z9gCLT)E@@=*pIFxSg?^zs6Sn+2r!B(ivmUT<;J4e52DW+v(`XYnR}K-n{}7kCRGq? z^X@dsF$@=km5s|YIZkWmz5RaYy@rBg0Q1V5P*DIS5u=UYBf$yE@A{Q714j_}qu00OR2F zAofkJJ9l8tH#0GHYeAYWk&i|8Fuwgq3F<3Xv(W>b1s2e^=oRf)>u;Uc*M0j8oFZS<^+HZCV3W zwO*gc#8SG+-DXX+wc8=1w7IJS`cN{8C(i4L-=7_N5%{@P7&+eq$GfX?`bv?Binx^S z~#RQ40h=+^6L(+_=860TlFE&OA#sUzD=Z@Skd) z*|SBt5wBSfW1tu9OLYYamV-+4tc-olW^Su>G+S0BSms=l=392A=I-?68Iy50 zaBVbh`2f{WqCteEjY*Giy3jrDf(`Y7uLoIXAulTz~B%AoAELJjWpf;;<(4ytYE(%x~&@yMi{oP29?v1)fg zv09??x-(F9IhwPw(2``s!_&N9eT@c2A_bb23f)P4U-$A8qPb8n;i;o6M^N=9jzN66 z*>I=L)*vJ67vOS;s7fRG5YA85FSs)>Fx@gdeFqWDO)qcD2?8EAjiIN`*P4jE0x6q+ z^dJv%QCwz*vkm!UF-l2GIXu+Hr&svHsIRW-r68--;n?*IXi28B)jWFG>~4el;Z+yj zAZMVC_%iYAM)QVpb!n;w2t#H2N@iVCb5vN!ld%P2l4TMa{25I1YzK%`Dpzffu*qCU zSKp3r(c%-$Aoo1X7pcx4+0RSDxdEN-dS9ZNPQ*cAy=sT4eXcucGI$Cm?hIrrN*cVR zhTZk87Vv)QP3cLKX-VX4IYEmuIAF+qmb>_UUC{$>^K&P-jZqlAsk~U-EuPw85@*|F z#~XOQI_!8>J33=BTDROS+y-s(ppclY_Q8km?+sgq%P^W7CgUH^XN{Yao7bf=#q#*E zrc`x$g-ev~t>BTc8QlTOSqCbFv*K34{kcNP_3?UkC5g7uP0X#w-<5NTQ9dsiH^0C6 zo8u3$XrUJ{;fOeBm#UIm_nF>RDU9DT7Ujh}r@6-$fm|0rTn=)6YOHy=ae4c$8p1ml z(^sgE#A$ew$yVoH0Kt1=7}nnO!=}#cwt7{BsEkainLM{t_=_oDKkUw<5aRwH-bz7rvMOj77cpyQMm$8<(zDtYNtnzXTI z(tpZNq*_{AlzBLRuxGi2+DfQ|6_2VS7{$msI^fu;o8E?C3$H(WgVpn2pIgN_miC)* zr}yl6XImwrEhq7HnEM#Sp0X@;pFHn5#~jF|bH>%Db|ZYn3M_`uNc`m?k}Xi8?$MU|!dGf{kc&lU>T ziQg}T2p-NgJ02WKpwuVGzgz9Cw0MpMP#q|3f*v4*3HVJJnvYF~C`{@dm1l46b}WMY zPwyW-m3)S32oz^_R7%di`Kkg3#{))VYaB$uyiq@T4t_5o=#GI5q(5u*-=Cb$Fp&Fp zmzkCrGk7?ehdzNXdLc>?<%~rO9sk<%n9A`N1zZP^BYE&ct%p_HO4A0rcW`jrM;<}9 zJ;*_*Z>GFfB$djdia}!8kSlnQZDTu+kdX_Hm3r(%0o_%F-MuUh8swr5$}PcHWi}Pu z=hakW)ukBjd@41kuRwhDRM-ayDur#)>z)%_?X`v|HG3q*VU#lW#o6v-XymCI>2H zboF>s7saR|*RmyH=MlL$w2wu(ga%=(bLv5pGJwXIPL?>kN)DcrSM#m!8;uquWLHN? zLyV+-uCoq@5~mf1{>%&JOl>d8VQcy)W|~wUZxzq0&wp)R*B;Mn1WywvwC6AvPdv`- z9?7`<#=vzM2%fM+l6gjbFA|b%!gm@3yojTG>$R zHi~{)Ww{NC4U5#_u8qj{HPrnp4`x#XpO`n0*)(NQx-gQ#I)5_7FU`N~2&NHa9Kv-c zy;jTPLBP^00YUlJiG#p9mx-fnUuP?v$D)mo$d%oLSQ{mx_2*hjg!5|7LM)N7`|5TH z({8HY71_LlLBU@M5cr`xrYhQHSF)l83t+JGcFZrj_C z?{MWhO0lEp&8$@Qxzgix4P^SPj-K&t0Yf}$c+})JBhf03NMrD)rh$J~*^R938G6xN z$10Kx*8$D}17q8sgyS94;F!5ceEA-VL?kN~+9L;3O0PVSrPCKfVDu*6Ef^T`Kw&#v zg-LS}I9h>#VX~@^H6g5VFkNrMz&g=6*oeTzg@$`kG=a0&#V6q`LiRXX{&R9>1ZxA@gpmB>7RjWGSkSQ?LvLDIN_w4VmL-~FB z(r_hrq{gkMBf%yOUmCZKpBJwr73JV!T9J;#fBVEclHM}W+1027Y+93|*O!U-Mdy|W zS_1UJ)mDtIgd*|cJGVL5WA=bH|76OVq$4L>C`!Qf`X~6XaJ4Y*5=Ov-IMMj+69>wU z_?exFpxECx%6o`UT~9{#4B&W~|B94Wx0S!{ zRRciZ8jaTe>|d}~rC(Vv84o{HLc9m$3q{t zEVW11iVC+Z1?%We4qNz(Ft8>*Ujaz{*hwU&T{Sb>SudB zhKDY}B`nW$;D=Ka$8(Ka)I&u-_samwZs1-%1s>YgswjTe*h5QWnw~WC9e(?}IGa5{ zB*M!pZ*l|DD`YQbI>XlV>b|1lzp)yai&-9FTtE2p`K#wl@m{;PzEl;h**hN+9G zn%__GVZ(Ju34^d(Yo&lD(_;tVAsWeSWC2`{)LGQr9Hfo@bf&t%k50u2H*j?9qM@Ak&CM1wh&_DpX;P}s4 zY#J{a8h*=Sq9?}0#UqnZ(ZY?;-Bbmj2(DlKydm_g^*U(K8KcCe=pTB4imkeSKzPi& zLPc{UPn@a17yH@&3W;%@hMhJ5AaIq$tzW5CMTFv3l1wbcv3viy5VQjyI(Qi0JlA5{6E z>(n=TKvrnpL0UB%ArM5((_MWB;Q(JBf31dtdJJ$cwUFt$-_=V$wdHYuWRP%aan(1I zKZ3CKurfDFHa_Ddo0pDVI&p?YU4`l0MaEl}MRH27AKkini<-f6&FP49ck=ufc+k}B zwBdRau@4as9~&4OXwKG^0uO+2ef)HF%(DpcU5E2b2qI>+78Y_*o$g;l&=_zswAXC~ z!|}_a)F9)_w+{gs5h3w~zF^m&8PY;;GSOr409v4EN3LCQH&QJ9bTmRL+bhs#v%xmP z$kIYDV92RC!FDBpHwuIs=rL5VWjTU134((jGx$xEc1#$pX&zuD^33~XAz1Sms`J=4 zeXZ8}P_clJfzsm^UGXtch8Ej`zQ`Z zd})3FO>h3Aq#h_kG;K`dS>ws)Q(fe@lafd{wlp9vzV{Z~XzuSS&nE&iAz&I37a=`a zTMxU6&z`%v;@dsgwimEGob!&M@$zAbkS1y&wKUB?3iYV`*$WiZ71~q!+fn-jEsLOY zMsQCdI+|C|7rkoN5*3z_>sKzKKxC2BG6PFXYHng6aJxrXBwfF7l6@tfCRM_&4P>i4 zci!WMcn5n^a70M!uMqS;mxYHVDkdY>FP-R^4mSWOWf+T=U?aX$DM9BhMlLz|uzthK z8xOJ(1h`E<)8poi6WZN#Xam1uXOJmc9-W^I1c0J@N-wWWDIk-uXnKp)pOaYppO+4W?@A zY<*QQJKW`=zFK!-#;cu0r@yyx%=w%~Qt&gQp% z`l&c_?b5}Q*W%s+$gkrp~#3aHI{va=vM0mLm*p|Vg;R#pr8;Vw*^H`Xyp*-0PX+zUzH z;!ty$dkJc`Sa^pE=5qWiHN-6kyA%iFZ62rI>?+R7LeQfg7p6TtR;7TWy<&**jA86} zth#jJNLeX=#_;3yEdv7sItJPR>f|O4s~ts%_gp=uEpD!-GAZ?&RJG#Ue8zC~H~^xU zUIBa_eO=Wan?Qd2+f|DqLq+z6vZ2sndPIQgFSI7b6BY19C(y3+!Gb|6Y5PAMS|`ya z2HjfcoGLzfVQlM1AF(!D)8E&q6P^=8?aiJ*Q{KT>c6hz6yCaY3-%-P8#lP?D?{7Tv z{@<7WK(hbYohZTaz%s}(_*|NT<4?_hAFFuqvGN530+I|?9za2y+3heb{p`dFLUrxy zO}aSoq(SXS=uY^r=q~CXk5eDAhYP{FtX(>*%G>7N4!nz-ekg=Da&Po?OF_ojLrW=| z%0QB~)*$sq4(bz!bcYf}xFI(izqqJLL5{>fuLY+n=}RXbdo+R)4jNz+Hs&? z@8s%{I)wf#=aiDa@uWd~%6I4Xe;u+q=ca&LRxLw}(i$rS^f^2XPMlGKyWCZs4d^Tn z{`oU-%TZ8*@HasI^N*F!|1VyObuwB$ZojJ-468v8ZVDy6GvR4CA%Hm(FEi|F^XZ(==# ztYBN3+xkY|^iH=#;g(OPNgk`na@4V^X<2X_+VL5US3nz2tOw?s7$vGEmtYhtTSVhp z-9>96LipVaP0Kgv0ooyAW1T)bOkOZjD~x+k69gB}Iher*MU8{?^wcZx*HN|5Ge{;z zsbw^6rm}sC4;y~_%>rw5hDb3Z0Uxs(hI>QM&A+PQB_17z#fq&5n(RDFG~`*_Vq08k zwdz`VUb8L^H}wK=WpY!%{QTRM^E#K&7gMQx8r#~2u=?G)QphW(fh9o()%ej)>Lk?T zY5#t_Q}n@v22&jHmR5Vy?wHx4r;78b)X834>+eUL6I9Cl)XxlO`Yn}@|Hru>NSXgS zd~k^a`Mm!+Cn)K_`nL`E`_upX(*M9t{IT7?Mew9IQ?)SkFu(dzEL8&;WtV|ooT&Xg z=HC6$B8U*S|9K=+fBpJxaDFR{gRja}2ZofD``ITi*mHCJd2KNl<2i>S+n!6224R-Wi%h4{RW59xC^+vp;$AB!?A- zN0_ZP&gbi&sW?0AmHs|AW*^Lc1C4(zHr{%!l-y;@RDazwm2$qcnW|}XrFv55*RR9^ zUC_Y%UQP@*uf9-F@G~C$^Yy>}>t5|D)gbkI6{fM=1V>C9n$G`Oo(TFs*6I4C;`=8h zgiG6?2-< zjb85!OID9E6szCf2c|e!*ROgG$*^uVNSlAQ{1e05c(fyG4`+YEnp5<0xxV34 z1)roH^1k#zr@`2H17`#Mojlt!!L!YuNDrgYJYQpF?w-{1zq;ZasZEN1!;YKIU&ns$ z%b%y?);m3}!hXnPJvaC32KM|9?V$M|$w7&QUp?*AJ9QdEMIG-WKCta{7i)GITpWLE)3)o!4y%)jjmXu;hY46_3GvvoaB;~7&2m;AsiL!)f+KQ8vU{6&BA78knYLFnesIU=z9$CDko?EWypW$Ps@MWa+j#im`ozb7(&Q`azA1)cgC+2yFF?pp{G| zaNsmYxdT7_ZlX9VHk;seXR5hva@!qigb-r zDhCD#`E{)l%?4SvEUbU+qc6aQ<(ixSe5GJHNm0zP>b}pr5~a;I9fc%jKhmI+6vXiV zqX=sMc~u43irC<8;g~i(DyiAgnfWcNCQWde#(AR*7}kQ%@HNZ)qBqqo z7HU@v`h*U<>T3U&+D)2&uIPdG2crJb>VsYD+jyZ-pCq+paVH^@2w3Hk%_0F3mxB=0 zQ)fyLiL6sS1kYVco`9-i_X?-*S+MeYRnT>o%8NnG(A*Sm7ya%(q$^wX?f#mFiv8|; z6|2L3$4H-+r{oZ4+4QDrtwmxdW)(%O7%NAM95WJjBHGh+CG?>ac8|k(TS$_lVf>C!3MF6L+lVq_exOsDX|W zVLKt~^chL%Lbi(2kSex`7STNG%&XRx^f| z9LorjuK({Ha)Jr&e#12dJmb7^eIs4BPV24!NO zJE>AY6x5P6;bZ%(R>vlC^Z0ayk!5;dIZLaH2$5&#z#?EXv$d;-*l0l8$Mt>E($bb< z7*wBIs=^|uhFwH^ooHigTV}g)CCKN=A6#g5)ABz6k^I^b7IDkZjO^LrX38WDQ%aX3 zBo}U2sCnu5CwwA2EbooO6UovgQm-IVcjE;gZNAg}FuT*nd8JB65+cNVQU(Kqp*;cu z+-sgNwQ06(+4MGJ>J4s0x6FRMLoR4MFg+!QON<-G+KRcxJOHBp>_f1{f0l6ay70N= z9Yz84Sg12fJ=siAUKl}|ogHYyj} z1%m1>BQq_F@P*$G0!!9w*JZ;clkrC1&3{9hDW1FIrqgUUJrK1Y#TZ&9C0eVrB>V1U zzBde&>owygKZ$z5_Vn2^b{MM-Ni5Ibkwy>moe)E51uNs%s;t0<8J&|e;o3TpKXdhvfmu9__sV^J7AAnx~0{FXv*(t57ejxK6W0dGY6taVsOw3OIeZV_)y z;ybjd4>*1sLh@^p|2{eY`X^EJe_#3|O#ibx@sB4{cflB@E3dSSKbP}#*uLrwG2Y6H zsqLb2=H{FJ#17=YAI*u|e|>`)|Jqi|IOkQ*M_l{6sriRTeZyQFnC`7|EP9uke;2*f zv9>^O|9|TNeIff@%^yFs5BOwzd-*mCIDhMiDWbRkk&&zDTtS2Fi`5AKzQt*EcB!z< zFlidv`!L20F5|5E@1O=aJ2lG_`8@7kl%3U~2_dbU^isdwS`T&CKe+GDVcHN>W7UhZ z5ynalJB6#6(!vD?+09yB?9tOO#+uVh|6aIcfN1h4J&NM-=QnDIEEh!~&XsFy6_>f1Kkf zo_aP+XPc?7CF3SWUYcxgPoC+_7W}?X5A~3L{%Xn&mDQcfqZ>8Q*~t$n&42mNX6XK7 zGd5zih_C3Wn_)nMHzd9EZ5qaZVJ+Q%2>pSKa_NS$nytna;A&w5eK@4T641QaW>d<2 zWF-IW-npAqwQv2e;H`~u&Vw0u8(*>HtKQ`8c~`w3@y1C@_C4Xf^Io8WeqZEJkUiHN zwSU#T{}S_)i&ba8q@G{f#N#^OLz<4x{BOkr?(Vy|le&{M z^E=k>0Ihi6>|6cE$%Jcti~i4H_;Z0re*ZWh|NGLv_VHi4^uL?)_kR3;u@k%~#ffz( zv31B86rdv*PLdUN1xA5$fjR!%f0>bgkkfJ zP1mh_8Fc6os7-vOE+{XF(8~Zs83f&Sb`$+zCR4#6e%z}ZaZUytHP;9v83#=`3zf$5 z+{ExU=Z(iC<4k}_;xANRc46q9By6Wh%~z*EEUaCc#C`SS=$htG)F$enp!@Hu*+ zxVY_nCpWR%vFqi?yW29_7_KM%SNn7u$-d>!Z%BRLr+4bwzWObW6$)hH;^lg}J?s)e z>cFt>Rt!9P_R|4@o6ZrsldNC9<$O40$3#o*gN9H{N_@upeOW?S-tK5n2|?`TYNnCH zG?1y(m_;$_>nEeJun_`)6tx{wHM36+q;i-(LT$)KZqRU{K4)Pgf#OTQ6v2iNiyYf_5P({K?f2C1!KJq|9tj%tV>goalx{Q^tdb&NijYBH`DSYDx z^B`2oJ&O4l;79MWFa6x_`mn;Q8W-i9MA!JyB8`mnyC|i{`}tEie!k@>pnX$R@@}GKeVGg*UV|Tox@bY7|;|;twv~*g2Pm)(g650dDjPYmIMGuB4T{hIJ zj*lev+S%Bo@FLr#R8+`eNZ8z#Vb;74O;8<|l=BUOS>1}$k#>)IO_uT_eh;)qKl5yN zJ@xFC!y^}d9I^?=R^xkjqtxpy@{+i#=waIkMPAobhm&=<Nj*Loz8!KkicHIC>zPU z!H0uob(mpsxN;QvjXA4f%DUIiNaTtdN5rQ<5?j2Jp{3^;!{ruaelP&OlCdb)X4ZOc zY?m7k5$C2!eDw*<%jYgF^te~4uT~4zv+=IFrTNENaYH5vjDaETA9w|YLuO?vk}-}9 zXhFh+vARlQ+b3l3ET(ba{Dwd0a`tbrWU7w&A}*uyfu7vKz0jV-2i^Q=y3fL2+3^fQ zS$LsPW?yl6S65 z&a5P!iy(I5BTMs*Tc;aE6*fJ1_}?eGQi2FMSAy`KEq-zZ$`(go|JIKeL5pTL@$mdK zW4Lsc2>Zhch8)@1;gK4F*&c~`hgp9MM!)HEJ-2(t4(^_Y>)?8-yu~%xl@virsyoC{ zSm<7wpe9fwsnV0kW9vcw_Fl-p4D4I1Bf@(usUp~;j}I8{>^MT%rt6J45WpF zkA%J`yy>O6q5Zqk?)xo#Oh_Sv46w%_!!K|H#M08z8wf$u@#3a9P70HfE}8Mh!IU4U z!S;Z)5@2#c4li_7saxKD`C3*ICJ?G+2YvZz6~{spkkoUxpI{c43>++Pv(hmLuicFN zajl;1fW7Cb$HzQepaOG}2+vA*al_<>sp`3W1577fDQTUx!L;;rKzW*s1T6+c1z&R#{Gpjkspk z0&g;%U~?0L*z2KOt;UD45&k%7dSEU+9#OYF--BFSbgzd-M&!>J7(i)Z5rU)4GF}!I z5-w#~d5rz}{4)*T>A_22=zZ@qD&!cqzXfP{_2FOjY{Wd8w~=&`zoCpX8Ay2ExI|Ho zVGo^=5*k1IPCA}>N&HFFYrZt{`^Fd7RS$?Yf{!~%W%+e~yt;#*$W{Ddbuu&6$bq^? zBdVn14nMgiZ>B?0{j6o*tDGWNW6J@J3#v^!U$ee@WvXYV*!EH0u4TjEOS2*!_A4;q z<5j_m@w-%yZ#q)HXGivJtP-1%lvY^4GmyqUbfWSGR&iL$yFK~5gbq?p6`BqO4}a6X7eP%e|6f>RC8egnQN?69CK_M zC1&*K;^4$C8&w5In4P+#BviRY5YW;Jxj$_7pB`h(qDgfUH-e;@iT3WzMs(BAgy>G= zpvs!Hu}ne7<-$*^j7@}?m+Y3ZL~vq*A3*(H>AR-WF~>zdn9sRV{o^$)xZlfLUv z!F+p6V#hmZL4qGWUi)(CDG|b|nc{dNJC_ILR1}{|8evih*k}S=G^Y0HZ`xunROMTV^7V3^mS00IKzzBi z?#~iD{rG5K%_y#qPi`}vtdZSgytmL_EK*o_zhb|5@LP69@;VM03CO$o=H}3&#G4{U zBQ*(lOcH|~6PUI7T);mp0%X+)}QI|=LEhwq(;+w+fU*saQ$b~HM zq2WbT$}_nL-Ir0-k#?5VZ~Mv$K`n*>yL$f6okVa)AD~E2oUYQR$nad-S{oyk;)pso z#}j7};QFQ)LXr&z6Vaqo{Zi_t*c_}^bXb(t%|Y0|S%7nF+93rGs_;yJ6hvX*^>1slAnn)COZ#9E5^%V8s(0F)=hCLftXOIMx)^p*Vn$!Fv-lBP=49Q zy7_JL9Hya@R*R&>t1Jj_>d2sG3gm2Rfk+Da7Tmc?<$Xn{<{hH^}gkTME@$JjVz-m|{64GmMbivo`#6g`jA|z_ zVuq7%YAz$jy+jxS8@nC5LiI#atG>s$Xg!FfDd#ww@_Y6!vT4t5`EX__nroz(At)$C zSoD$fwJ^QO=PJuja+t0Su1{~rs!^LRzK+>WrDI!(f#i0*OIVR;Q=eZH*zm-@&i)J) zR4;Bluyj4L2N>p>34sN%et7~Tl}hsB=JxktGTMVh%;3J0W6}3K{RA$9xre$W**=%1~T7H+S^!k%qg=*9x7DyY0cl}9@^Np{K zo;&1hQ7L;ylTAC@GBof>%=(&HYvi8ow~XIaBd8j~UB*cE(*$vF6^1|;bdTAPnF{ao z0tim+^XywqNl6dQ7Pj}kFF^#%kJ&eUNbo@}%?_DVEHJ@R>V97h!~4;U$(!!JSe_is zehw3d6H!=1F2VeKuEFj#_|&u?_QaQ>vTLJNb&k(JUe5`~Corq`)Pt%MEhFhjvf^fm zTR&>NP$jbVe2he$$b@LqUsh%MuX6N=iq)(s4I^0U*f626Ii;_5BED46;@C>I_f4P5 z=cG@_MGE#1NwYNosj84@ATJ{rW?>Se~r;I!^jrV6nY4e_50RYY#)g z^R!yEUNbAVos?SIo_Hz%v0VRJ8drCAwc#tOQ&ZC@1oiXLs?nj*Bt{w_DWj zc1*UT$p{B^&m&*_+O*9j@p1)mxpw7Z1;@5h!a=(e(AhnoG>LasMuSGz9eil6VArx? zk}@mtB9>F7X!pIDxrp${+JP$W`k9ec51FA|%h_8U$u5aKZl#urY&KZq_IuMA>k3

OMRc>X52Vt@FLseC*4BF4 zY-Kw2#y`NdIc4UY2ILD@Bvw56Gvymy6A^lmaCpRgM>y}s)ceOEa>sN3o7*Pyd#mMQ zt#CzpJaPDF6Xl83@)D(H&13#&+td4+;35}=oog?FXK2Hso)oAo$_G2O(X9q`IEjg{ zT(^a7@BFHrmSX1e=zONJnE!f?Sp24d@@Qb*>T&jqaYx?~A4HUEx(BZOubhpCNwin;EdMVlsJ?pmkq9oTc3CZabGY=GZ>b44iP--&=f zHMHB$WFOwF(O^FHZ6;`@=RS-PtiU~odW83IRf#2NI$lNG3mr*Zj?^R1OfU4bpvwO& z7>mEx=Qe?ULv!+k_h=eTd~5G%CtasFTb8cVu=`5fyZ3x~x{Zf86YjU&lOOs{K+sqW zfc%B=E`;Yn*=m3`c-hX2;v^8cfPsLVN~^^#_u1oj%cb50k9+!mzUOSrw*t0R7`Nf? zN9ynvy``;IgNOg&0%3R`s105it59wZHd$`w&S+wL+_HGb^&nvM7niYQ`!R}-ERvmh z7!cD8kZPJgg^sPb1Fg224$3AHBtl;85d@sFg-Tf@co@xWCCQ)?mAs(W%-~8x{4J1z z86E|a{y>&(v%U0PN}xD(LbkTkqL=4=VD#0RI-ckRI*;{I41ek1XI$69zWf^Pwjo%Nwz3TBQL};YGz#Rijk#R`bn| zF-7;6;AQ@x&qF|8P19%C*rg-QD?3uKRhvU;c0m2gA)?YtA!{lU}nqy0_~e3H<6a=mX8y ziz57;S#i0cc2S!Xf!B}@MDVkwD5Iy-XCuu&MY{62r@PS%EGiTT1xA*;pRd`%wc?5=hEr0L*#pW*myDk?h z5`?PNHbdd-<-D_>-OU!{oLAv-w3GwqhRcR~RrG^aSsH9_i%75bk0^zM!(;L@%xDaU z&9 z9kx4@p0i8a-S6srUTe?x2l^yeCruJu14H|0oBE@4()f=_M5(+(NB<9@S{2X%qVzvj zMV#O106Ag??c$p&%sD$$dPBlE?H^;_19&Dc-E~wS4G)ZWr6y{|MD|CuvkRMh02WAW zc0~%>V^!a!E(PTt0b(MIjK7h%mtBu9&$4~lD4k(&1044)eVYM+BB;qU0T+-1-*`nL zjkE9IO`y<432rR-MUu8LsR|K+SpMwzn}FlTCDkTzs-f(eoGfA@yN$Yk zu5Q=!#*@JAl9!c+D(u^$bQ2&bWs-+8H9nXroll1T0K^ePh0jN_Icuq40GLdIy1nm{LWrVM`urH;ttY{lElU*XqxDzELaG}?XC5!(bb z1hQXM>5S32Pkr46D)!va1THCHd-_yMo>t9uR^Yv;OF!pFfv+b&5t{`;7WfDeQyaLtJ-EPS48Z)1h!Dfy)3C&m{v z4>HSeF2OXg_ptdHifadOfa(@cx%o6bW1V16JSb4V?to9G=B76gl~jmZc%ktzA#*^NH@qzF(i0<~N;HBD$07Bt|Bq5`1 zG#-qSj~GfM8GJB&AR-9`j9rHpPw(SY{`m;b+YGg{!o+HQ8C`+5iKsA;y)cMkXhQ3w z-`W=r3@xn~50{TuRo#K{U zWeM5WI&MO1`lSbWCBE~OA?&?p`G^=J{H{c;!6Bq&Qm*`iyh7Sf8hP?@iN>sREsN6N z8sMK)P>cT^3(cio1=lFsUgV4hkH2TO?r~TaQe!TYq9PHKcOBl)(f~8r3E|gN$ttX{ zUI>T5T9@AQ@x!_0u~wT~k3&pFe`Kw=Mv@ab8G4x@>sMO#TE^Il~xA^pAp@1lD~T>SwiGlX`Qs z9Orv6mx=BT1+3%!r_zGe@SDH^?+4W2W?)cuvT;3qNT=JMBs#sy-$1oJ2S_y$ESw~v ztU=h$9#fIrVvjP-eKZxKqSoj8gT0}JbC-=UHX$v=1)gNvGA>hkq0|v7(ZmK!N{LDt z_AJVoW{IpZ2HE?ZCfmB5C`d{b#JjYjOn*nUz2Z`!Mkv(&u?#{<{l(V6nMbMv)36+uo~4OpO7)ZFJZH7H<(o& zmaHT*>-eN?V7NNsl9V%*V z`G1)>-UQIVxOKzwq!*%>_E!wmj6l1fbt%UEs`+ zU>B3bbXr3U%?lH!u<#1xfhw!AMus=tjsb^^Ua-yi>yG7$nOvm4G_VJz~jQ0XEZKbo`A`07I8tKQGC=*BnsE5I9R2cdX!Xs z2V_a>F%E?$L_AsRwK0A&1JqwZ%rjiCMe%s`#1S;O0|ywa4EGY)^?;}#B24Xc=XHe6 zb-X>kQ(x;IAy)j(xC6jdXQTZd&X*A0G6;RX4suM!NSkGJmFSQ5apbHSO7Xr;GAXR1 zRrXimqr4dX|5^%GU*jm?IM*t@5cBVz2*WU3`jaM{)9AF}t0^O+$BZ$|0iW+2@!z1r z703cv*9+C3O>XGZEb=Bdu8LzrE*OHDBat_P*6^JHunuiVZjwf0snh_UQH@&Q(vCv( z1Fh@W;!tQe!s-MC+r|-$g)ua4gmWreKh$VC7`kE@%|@=ie3hu=S=E$evszIBOXLpj z$8BHg$;iBONjzR#D}ynVlHhb2Mx+@j;*fgyL><95gu3RA_WWKz%#hjdc^HbnT)Xv$ zy!QzLn96)~>%X`U$Z6@|@x_C$KyY6z-sy#-o~+8Z)VPo ze<0y}>-fd~F@p(eWc60hC8Bo{^b!7I!Y0+DJ~k7D#;@MM$7;hk$5&2;37fuXCagGK zlHb7RKtW;}k8_@IrOakI%Xq>2Rv}j%NRgk)k5yw?+9V7!{}$;kk}r0N`lRt(rcuf7 z@nY+b9VFqRl=3`Jr{Ppe8CNV?Qb?a(sjxio=3ML~P}6QWt?|YYXHorLXJ&BejNnns z%EkF#bJcD4EsGVK<#?KezA^Z1z-}d;bMuRC$=h51+4ENbWh(OiieLuzW_9c0nQef% zKU9Hamd<5!f=#H(QpI-R!`@V3hzGRUoKvQ3hW0{T)Mn%p=LzJ*+vpl!5*!f-_%mzW zG{||Ce3`V+oB$t|9+p_rsN(F@O(^%PA=>g-5aaA^3Hs2xPq4}duT3?Vefky^#g;(j zC{-#STxPbH4G!ljS=)V>cFG#7dMv3^kW}mP9$(dUPV&oA73dVPn6la&g=w)R(+t__ z7Geg0$?xwFpiyGsuo`3E-4U)RRYdlGM2o*3M5>~nxhzCFkU*g$LP5hDM190wwUd3d zPTo|2e5&c{SCO*TWA#CGC4Nf{o3=+q-izhUNz0L-Zc7^*SgGbU3orNkj5dob^4 z=Jr9_w2B_-NvfwJt7kJfNR&hEP)0Q*024jKysWVgo2y3#x%2BTXF`P3PG(rXkAZh3n*DuGe^OjjQun27oHHJyr)J<%n zk}%C1DI5)NKMm^ZF2@VggG=7{Nh^jM?V`nlR4kuyl=P`&KU3+x{x&{p8)oU=8KSzuQOj zlEzPlCw={n4^*~cIf&L{^0^{@hxru4bOyO|AePlut2{Hvso8(+P+au(g;4qVM^>)ZO0BR2t0m0`q{C{fRF+#g(Po0c^- z4BCX>4J<`s#ndB>BdLxRn!+RPp$tVQzBvZ90D*lC{AyKP(w%suN|ln#>W;Yk@sO?jm3j znQyS{@n0rUZ&84HGUu`QqdLsgj9LDdJg@Z{Ukbzo zt5nrQgN8(EQ*Cg`w#gp8g#? z309ZfQug{eBny2PJq%Zip!K2)2!iC4;PL$HB3--J^lpAG_FqAEIE&N1>aJmEDAfDa3=kmeECV;^ zPC3h$1|C6PYs)(MIa{PF)iC4EQ*gPXsmHzxc7>7Pgc{*=4tIZr34;AOl&^;s-Jx8) zU;1DNe%=x1kHwz;ZI;Y&DSxA&jB&`*W*Yw3Dul6PzF(yd8zm0s9#xz;t)UbReSBid zU_pF3>iXm~-=bGt!_(mX_&9@DxKb0HdVi#QXE&yEzFWqOd5uVPv^69ptJ}(`{M++| z<9vnlVN>FHHUoLgAsn6beGp2eNa!P9U#dq|@15&ar|msX|8UEl=b-<~9$okK30i9c zGtmmSDi)O`5q+TkK&SP%MDoC=?-+sl7ysHohn4->ULu^>)t`Qo{1f^5;nbL3JK4zgXaMM^Yc#p>Cd>*?$q$o1f<+lL) z`S6cD>DRyehk|q8iQz2RO5W=cqn@t-?DmHVg1=uI9A3MNe!ch{>yGUjg zaPS;eA$leQi$!TJ-sXqZ%>wPvluYVd{OMKn+%Kk~9KvtChRmm9Cd*B=Y7RYLIzSo# zMp7UN4<(gv%J3hIc{W|DqN8T^x#n5RX~@nqYs`T%=1>4iN#+U4tc$UrrFSSCJF-(@ zi%x)_{fhRIo0Z)hCpN7f*p4KHY2Zzpg=OJ=fdDhDa0AJZ^<9s{3@$m+tZt zxwX<~>89PN=}F~1!E>QH!J4bRznP61KkO8%awTY zRbirQ3wkvDn9)^86Mt3kg-pI1LI%hpwI6lQ4R|DLD+pXKbTv&7^XP1b=?yz5XBrWF zdx8)G5a}px|ND|u!Z)x(*wVyKuv27yIM}?Zyvcb|4Y{1Z;vKwx_#B7X%og3Pvrox3 z<0kAQ?UG=y%$TyZ*UB4rzg#4~isTrp0-Y>2`(0!H9bs4ssaCitdAoa+RQf`-bCnsq zzjX6-jLfgP$cXLngpw~B+xIRB43)IuH8*n`NQf;!+oV&bvLZ|4m{GnH={0%Ouu7pP z6|IX+{z5zG<*k^#Gq3k{cWHjRL2d+khFg=$#AC1TH-m@ivI6q{D+qXwQ8R*cWmUOu z`OD(6P2JmFprgGPW&V!fENpx6*$I{0Ne4EfLR01iyy-$p8EG=ej&i^Y9d`GzjGTrC^eEKDexHsu{h;vwKLik->>eWmt zru7@%2>zr)Xo;Wju?EWf2YKpp6C4F!$QNgYEBHps;L6*de`e;bKu5@Q`4NjMYNBa zz1FBPLER;hy>!pZ$>AGx{g3cAeL5E9ShPO}PA3qL)OJOnNyBi|Z2hmRW*es*?>O~+ z2^9Ge>@R|!CQN!=excL}v^qBR`%vt=01cFhxd%nw0BaV6KysDq)S8wmoYP;cMj?XUDOby&zL){)oO=kA&uK}+TIkpFds*$(=_+ptt zx*^#N`cNMGO;1h1Mm{`P?I) z?NM6T<)-PkW-}vqNA_Ff+~iA}`$$8pZv){P}dvENxxelL7NnWl|Zhv>**kz#^}RnN58_U%O1^vk{$DfYcJ)?)c=u6ooY z4OKhFFiBOQ&wArQ!ef2uAWrz#mclsEJ85d_gyHfUi7?%KK)Qm4am>l^3SO6Qt>1dt2jz4%0&nzccrjH{Z?~F(c zX@6*vM`O!8%9CUq6&G*uaWm$&0`#vD;dr~R8{w&Y=I2Yun=owdpY{tg7n0BnZJ~xc zcVX{|5a;-ZiC5P+h3EG^w9(tulf&TEDyT5P%|wP11ym@rHA6z7UMtSacHlx0xKu(E zFmOPB@k}9=9RM?xK`25WYDq8({0sR;o8f?0 zhQS!;kWIk*eEU69<#{rmQHWq5$&8-f!e!f^7zTBy+kv9Ytd?G#f^B!`U}e(rx&7s- z{OWZ%oh|!7OlJvjSW8RxDTw9F$vc6Am1XGIkWhPVb43$GKTevXyaETm;aq>NIR$!* za%{EU>O-F%T{6x_h$8bWh}P&;D~KBIwgcA=EBgsPWz~L3f0j2dy52~<(G;H=-AQtN z8%0GC-T)^ap3$tMl?zi;h|BG`)2-pR^76>N6zG=1CUQ&}wjpYfz3Lq-vgsSUbL?I7 zK4<~I`DjMtTU873k;9?t=(Uy$myH#Y$9u;1tHfI#BFOLiO5w#xA%(P*t~&f8Db=@nh0O;i8M z_lzKSzE)vapgjpn^nFfJ)zQg2zrbs%6y(Rf#Xzgih(U5?>^y5({15G5spr^Nqo`eM(zU; ze^r<9JI$R9ny9(2*=s=_9k;D zjC>FPp?HHzw66o~}+xdLMQc~m(ig zz5IrzUSHTZlHYE4j!JP{`eN){X8egj<+9pr+7@xGyjcaIF-7#v!3%p&UFWu-#bPz3 zyW)9Gm*{;M?zhjxO{52g77GGUmp!fY78vF9kL(e$E=SOj*OUBB9f%yiTWFNyW}mkX z@L!hAkOdK6oR>B>j%~zw7*-av&zY~DroO9VKJb~69xE3p1$rb(FZf2vpVrg z;r+_xSC_YLgj5giYdSCY2{%LkR-Fvb-3jqOf2dHD*RcmT=^=WkA`Hd6{W8o;AuXz4 zG(I8vzv4g$1Ls7ZsAb1R=_#n>X=Qr0pk!XGy830ulvMjk@$)Vu#=xddG>7687Or%c zZJc3IEsh}q*F$1Z$Es;jDbu7jQ(su0W%l(ye;jh+Ilu z#!2N*hgoG`goA_vG|J#jxqinl`#CC0!<&C|AT@71xS}9Oh_wyI7*bj^x+bT+&ded7(j?>TCr6m0_ zI7FU|YK-Fq_l0gY8{<`aQDwrrS1z)2aX3y4vEuz6G>mCXTKB2TGLD?>zK1Rk;Z?KT zX$JMhj)8QG(SihjAF9i^B^ef9-WQU5F(}9781|z)iTbAFGHuX1@GtI%VHHiZT(6?M zq?zMLGy9{*`xEr~um3VURm}^DzV{!KaUQFdeihF>S}~Yu&lD(EuN_**&vf>03=fqL z!yN!=QmP{OAGUX(^E3o{8W>SVkJcls%dFU#jGoT;A$ z(1Jq;>VQF|76nke6!J*&br~|=@S7lR7-cn>0Ke>jCQcchlaSRrgCU9Z=+zDR7&c$|{; zk?gbOONv!nr7?aq!MN1lJn8JN`l^}tdp-D7){KY?atZ}PhWT-DiqHV2LxIE>42VJNs)8}4ME8pcg&Ii7S z);s~@nild!=e|{k!3|x#wm~SnEJnwJgAV{$ZrXgN$~dWiHhVPv-%SUBxC2{|zK_CQ zR-UBU<-g^k?@qA-mhDfDF2G+8j!tdwCq)7JoUi`_On*CXlJTcPy1|kJ*drfRPsB Hg|L_KVAt(Wn>djXft{V)4y9Ap; zss`?$gCGxO0lGQWtM^-L2Lc9Llah?JcYlExW*&f&wYqlr+{azni6a!qJF|WSx-hyJ(RT$pu@<|HWvJH` zTsDTEnhQ=9Gky+~4x?1OPnR(@X44Xh5CRnPf0vE)kfk}r#P(;DE5_jF5hvrCI#`bA zYVs5?68~_64knB3rdYLVpHRH`D&VN*f&hAz$J5Vh%sBwRR|8Hp>KzgB6Zj^v%D?t7 z3p8}tH^Jg4;Kql`WG?3*tP`qK@Z7FJr!?r>L_k1N&U+(#yc787*RVmcG&n|?nm&v^ z+8DynG(blkP@XXL41ffZlV$$n`h)j$J z^K49X@4k;Wx$6z6H77mE0ya}o+~|PEY8&))(T4uh@!pO7;OQ2Q^~=3^w*3(+ z`_Gk=SLF>f#E3c)zwiF}-Q^Tp>00EAYyLa2+WMcs>!`%24-96RJ5@G(g$&jXuIki6 z5iZkwg^^_HH*e9CU|1m4c@ z_6IZcWl$0=4bQ3ny??CXGkd(Ft@{F?ZiVA;m10rHPe^K0W+OeHmxv;cxusd3l8#)6 zuk8pqp3m+A-S&do^CV{S`1XG&3SPhHF#77H@VqnZ-0VG{egRlQ<+@XP&one|vxuZ{}% zDg?B}s?Bv(sa!w(Ys?UEXd@AB!eio!f70HWhoGU{{Ue5xv>Z58%NKLtOAluD0?Hi2 zF$d%u7`eFs`>pG!eu|H(DINFtFnk74dTtE?_3sU$uyOe6kl(!O?%93tXoC^w6Ypav zMGd$jsfkaBY5U*>Dh*WBdPlyuEN7#;~UZ=TzIgm`cxkH|8Gv-XXUg&4x zDYjPmTg_@9UhFNM10)AtkDYZZ!z*Nsdohx)u|k?Hl}w^WVTh@(z}gtf?8_s%^EjbK zxAgzSk<1bz2=?TYJYbWY2fjMN{VF#OtQN#lKRgxM|lt-wNy9YJgSKy@*+wFRPMTlUn)3Au7xDT93;~~-82^( z1o+1ObJ=OagbY#(el#KYNCZfyks93Nq+xZwNGR&4Hlg+n7jY-R0<8Yz@#s7ct~I$} z-ALta%{@`~I@BV9{G?W!($3EQ$@v{XKZR++1)+`NewNhYs|dhF5gTfl>&v2v7jAzz z<|M^A4fkCOyry2r(G7ql7W-Ml=+_VZ{%gmI4>W=d(W{V%}Ff#-(ek6`a6AtkR*4~@AP$q@5TuCL@ajVn0d^G+` z>LMb0i~nE1MGgYW4eO2~-xU&Lk^r*4QFX%TeK(%ZnHMsSkC#Vzv_QjO;R7RR^WnDo zo5(8(7DkzZf}Y#tu%dZJ*0l8eW1?aYE!x$kYmLE}o2tsu%WBiu*Rlks`a!S1L0q3yWF z4MWk0=DVZOK;vX#jOS=WyVa;tAo{sm9S|=fFQ{|iuL$roe@}05y2dM9E(r*A%PW`K zWmhXQLeH#v9!yK9)(+DQMY+aSRAR?6AUCi+6KG#vJlG4wsC_uepi<;@6PP8gi$#jS z@B`QWZy3nMNi6hLjY@CMI|Yb_+MNFczAK60^Aa>ou$l!QCKhh)pMGpoFCwvIhEpwr zWmtV8mAlgqe*HuH3R@q9THoEID5D`VFW&PZc?uf7`tzr zd7T}S=7e9@EP~o39bio~HLeP@UMngts}lc&E=IuthFF;2kxO0Ly|0>ld0Kr*a=9z5 zUs)LV^|}P0HYFg5V($KWfmsf5ah%My1pMA_lR+w$ldxl|?A;2ph6!3mJsC=R+vk3v ze$%KUtAZYM2*qY)bY*39=>qXR%I1e(uqY=iswTxhug=}Vn>wq!QdIu-_`%1IRW-Kk zn#bz(<2!v=`o(jGojue@ozv`^So;ALZy|9XTJoK(vMVK4kz&HsP<%{+ zDqi8!$duoGXyMyo=tBEI^xS@N?%;WjG*Sv?dt{#I;eDFEc-!+rrpaWC_HMR&YgY8% z*@48r#s(SR(zX%#V|%bp*n=ogsxPL$p%z!C64bpkJJ!Y{US#~N%4p*iYOiHx+q6p* zYhy@qzVRFPzggz?0DE9$Fm8mS%OA4G$ zYx%7%%7~LKaum0ke((eF1z+jV=VK+&{rdSLc&D9~Tpy)aL0WAh!3X4cBHGC0cSVEu z)3~{&_4idbVOqH_xoHNi418lQCN^WZf8BHDjy$w93Kv-f7EN+%-^zDNkZ68N9?U#w-gVAPx%_xh zA4Xg>Q%3jBv4A?_eIf;=2Rolk`WG<@`=n?lH73!vUakU7c9wYJr1G}pY;z3S^}O8y z8&!JAD=z3NA_;z!i*qDGk6S&D1SE%dv;lXp2C0!Krv)*=YnwgJKOnzzMOUO-Z&w-@ z`|r?iT*>R4MCJEUxi_f9m?=nV;qPFv4zE_JdoEt8Y+zR@9q*mzHM)+S5R@q^)i)Gl z{fHbbu8om26HkGS%l$gri3$CoL>5(bxC@OuM;ye^{`VT%g+KM^jsB4cqw{~UB>&f! zQQ7r*t~3eM+2@kzM7JPHf8{3NAx+gZGW}gHkSFyn*jbWt2TrPpgaGaV(EVb1Llm2x z`8`bBe#h+u`Zv_}vJj|LFu8|t!cZK@;G)bnEn3v4MI~6aUa53$e(m92QWyBstxx#T z<8`xFZPjPQmzW?=pFC$6-@^aEx3FcKQ= z$7HTgT@35*AzANz!)=ezFD*=MtX z7X}{!F-o~?+nJG&wfjKe&DE2(16G=$b@s9 zi&gXZ6IpvT#dd0Oo=C#@!M6}soK$qbuRE!6*KxN9k=%)=Z;!QMy9~@53@1IGE^8Gg z<-+rJvypEHBG>?`C>}95q^H}1a&_P*&>JcL{A2D9F=xB@E-+8ry7MK1i1dR%??-bg zUvwI;<>yablog?H-M7URa>a$`pVq@MbJf#msvHu2vWVIrrw~~(L>O2sf*VNO zv_GTX3OXw7`JlvN{s!2Zfk_;vSF6g&3Hf(%^Pj6ME~J+{?lZfVGw`ui4PDV zePba6w^()Xx?!xny`TE@0TQ!;SUYYDVh^F%(Ikd{47d)_%2+pzDEeJ0g$lV_Tc?Hs z_aGnDAgFO^TuErIPj}l4>n5fFQ*zkoC8;ju*z z1bU~#snp*qs&kxsIWZ$F=#?vDH zs%}0nFO?^+IvQbS=+yTom0DA<2W9b~h83ei*Re!xL!%UZ+L%glmFMCOel(t+cH50= zPLDtVX>*_8!^|S4P>^f;bDau8W&1#qv3+FYh03s&)6UWRR|Ae=ZIN zE+szaZ~N@*HdZWNr^isQfiY-WW`m6cHqUqASWReej+YnHJvDc9O{)Qtq;9J*p)hSY>Qo}FJ>ywiAD#H*HEQddTtXXglxnhlo?+bWN4j-X|xDB9*O?0!Q!dPC( z|8KvUxFeAvMj$R3waPs{`rLj27h%4;%&E4wHx=&%8A9v+lPC-&>6_ zR_96%TPjorMDr78?7x&ut2Lda(bRQ6f|RLautQ-*u88dCAmJfG*Z(fGS;SsYHsaWh z)as^BS_Iyk#E~z7ye@aQu>e8u525ninpZs@+JL*{FRDIP#Uyu-b%;eQ>>)9QUL~Ae zJlZ)~w*zKqHYSiPDEbY&Q|Ze40zx75M6Q- z4Rp89`X9+0)~AcVjfab8YIEm2dN*mci`AnJx?k;MT)KV}M;Q5Z37+0%&2^meaj`3! zT9zX-=G9jz&cTRk)tie}#tUDTjKQ+lQ=QJA3&r~5_}&Ouq?v)-gi)6DmBaaBgY(Te zb&(bKc%@6^t!?+s6>&XVZiWr5HP<5nY;>pa9ZxW2nkUww>g7aWsZRu1g{<9U3P3Ju z6YH%b4=9I;1>X%L8ybe+!mZ;0r9_@Z_V(k$*$^6TKlnuT0SoY}k2U~Tmdc{#I~>ex zKO+}gq5L7vKpV2<&9+3sq(gzP2j(TspWax==qz5#A0kOa&*<2^C>43{`R_qrWpIMP zu+aj~)T-D~+2F)RD|b-F;a!N&vztdmw8D3tYuM!vJ3psOX@)tmJScG!wq-Fx?aUt) zqh$3S%@NWK8Qv0Wri~#P2qVq!`J&Gp$4jN0@U$c3X^-rxr&?SWeUJZX#C0BXXm237 zcUR7OzQT^mrqL_4yn2jEWdkz?Q2w|Haenn)Xyq-8uXE>-pJ4!*-)KcS5{{IOX7AK` z9C^zR=V>c-OAs~}x@(>`fC-feHXy}}d~HX&AL>`@U(vd~X}*4H_I`O~vtFIDl!tQ? zm$OL5T>a8Oe{MReUvbPd%A*vRW#a@lv7dE$UP1+R?JC6UivLc=PU7K|AFEu2CjP?J zqU&c~U1%|_Rw~jQ+-@2vcN++Uz`T*uH!KU{LRy-;>HNPpQM7Nz?b!w_G=wUv7NE-5 zqFoh&6D=gvnK4vu=2}}If%iI51(5jGm$dhf9YQpdzR;DrgUxx7 zrSHsyT?FwL74|G3N=M4W_lJ}x$B#0@O`-JtC|faMe$fA%ks;UHXqoHQC}=o%zL!dD3vIai*=GaNXd2BTv3%CnR5Rm1FA1lgZ7eC&e@D7Q`R2 z>*G%dwdnWw+ z5Uo|g9*Jw-l5C9H96B%ZW_5CT-Jevs-~;iU-`|b<+oEzY|F2xaY&G5V4Bl=Q#fBtH zt9#t+E-UHhCtDAL(pTDXCAEY2sur4_fl7s#N%a~5&$5L=YgSh)DDTamwb8jB_YcZP zve+pjV>aGVIIi?Q&cZSGdRljo!d6*veL(GUo=13joppP?(*=h@XtM%aA(jost(?({ z{`i0voTf@qySP4(`$B|V(Uq{9d!c-+FmdXDgj4md=n?j!>o@q->rv8LNi_5f>~K0T zY#h^Qx09?<@(!1Ej^qirQC3Ly4Hw;$@_?gn8wo`>LGs97jXqik98AiePBfHys^5LS zzPUp|@`Xhe$?}(24Gao1{B=Ot~AR0hr4V{5h97ya{kTMntyYxn+WXO$DX+a z_9`=&t290oO_MO^mXvh8we1t_xZ+H=TY*+J^||FX9QuGR6L8{VKU6*FgrtYp1EltQ zVd+{sLKPk2Q{69^NW4GpFeoz%9=efn4Kq4XZ4cJm_lFy7IB|ol2G+J&)*~_FA9EAj zNv&EM#Skpwrn&oR+^-Qc%$w_K4Idi_Y^SGJhmM>cOU6l0+PYRV#70gzYpGN`O+O7p zokTHvp4)5y3(!FUS?~dMv*tI{t}NWyI6XwNk3xkzf1^ zzsR-*KmELAy$0#vLL&L_c3FI1+7!)ikAsDyRZYxsRq71Qv+qH%uFFGV@h2+uMFUeW zBXKgnEGQpVXC=3Jo{C^za(2IKl6<*JdWD7j?-r~JLxj+alsbN(-f~NB^L|o$>kYhEpnC4_```vm8IyqpXMMa?(zlv86o zA9VU;S$Xs9PD6ms`z+BGfCqd-RDM4AC=K$3{zzC|fg_lFWKeAmNDz7601)4u*^3i!QxG=6L6QNvJB z-}^!b^`7$RrV4AOyo1PY`vbu4qBf}Z{b0DPP}-zOU3I@n(+xTTtvwZiu3sWe2AnqQ zLa#9JkyeTuv{!Oph;Ll^>+BhI4tEI@KT{7B!AvLBw?sD-jR*rgUu1^)3aP~K$Mv`* z4xbI6f6;)K1N!8fLB!D%Ha249_{R#KILpZuaNM&}?(9NmP}dT3SZs2e(>IOuH(bDG zsYEIp{i}wV;FP5SUk^j4e)c$N++hzuq1y`9V7Wv7W~inSsVp2XA>e0|?SsWUzn2G` z4fb$pgS!P_%{J=xawtArA00L3)J3+SCXTz8Zy6o*^KA{Qrv} zR5v%>TS3N9s$8IJYq7y`SitFJn3J0Qi1F^nN&zrm&YI|GioE5o|)2F<|Hj^YHq`SXtEkGU519WPjRSUW2$IfEXW zwv_UX!7{UH`r)uIW=gZ{juzZlQ8Ae&g7YlBTUsp3OuqEzas~)4)2Jm4?=7i*t#fBK zcwY-Oeh_Ba{N{Y@0%V?7r;pXpWfQ-bd`6ya3FBPwIOI3vWX^IS9DzZX#m5y!%O{EM zry6Pz3Zf&_uFd{QIS(C?|785gLU^K$?&RiI|HQ8&KFZ#usJ7g^QG|$#ntqlqmTxQR z4{@L#Nyr}vwnjg|x0|Q9r>pSxL0!J3t>FY*;y@4=MWEb4eIw|mU*)=BnvcT| zKdVYNAe+W94TFHv9gL2G_b(Y4>?SEu22#l+yDSew_#tqj>tJFb6pk>K+csRFkRh{g zqL6!%t;S)_%vyh$t+&##OWQw}nvM6@yhm{_B!erk_wO1mmXQ zU1MjWc>bt|5q)m^&Zz-}_p%Y5yY_lqEm=y#|DE4IMGJZvqfLHre`BG1PoUlf_^iyu z`KgqDJgt8PlPlFl9>KePBElBzL5wMDh~SY@m&pvx#4(prH4Sl?kZ#{@7FcToZ4pER zothB*J+>7;=B3$~<^I~C35d6B%Pk!qsct6v$n3Y7m{r#q(&t?l&{<~-4$ksgyOt_k z!le*K!sGqNe9xqQ7lC1l$q}XroW~8_9%Tfo>f|jTdtsCoVg>nxVD65>u}5h>OR=i% zJ-li)OaV3Mar_CD98+fbff;=miy_7wnGP}oytiyYPCP+RciDs8Q~O;WTFN58I{fDD>=&#;tAImH+NW9J zx=uVSwKo?lAYnaNWBPXl3E`Z3dlc67T|r4zyu)OJP0!?~I{}q@5bB+;z46GE&=8Ls z+PI{$J+AD3rc9~6p4oUe6A!LGEO*4|veEpO0@vurYLYUf4#${0Mytzm!AT9oKm(z+gBi~hhyYS#UBtC+V;-oJtKe3KY3Ana_A{v z)Q17?4(z)JkkrD_c=8Kh{v^>7cz#`bGj#!WBn}53}G8v)pghg^MwykhzN*^sUYQFBm*9pd9>eio4)S7DEK~ z5x(-~eq%i>PAP>;&X&J#y{N0h>xNllR#d_Qmsl zdyhTVe}-#$$92wg&SQ=}(cRYiimxdf)~-~&YYRZ+|9(^A>X1jt%j5bqb2a-tj-Oj$ z)bqF1QHej7@lU3M+ROeuFSsm@ScJKk54^yI9sZnLTay0U72~6=4y)($#pE@E6CM_J zYv75~Q~I;Mf{tIi`|0O6eY36NC?TOVCS9o=C;I_>a~Mo&xw@KG2Mf)=cI`~o*d>K& zdE?=sLN8f={yaAX9j1=3ONn}YJksoC!M_%1%v8-^$14`e3!&haBRTMO&l+DU%x!K|3+XPp$Wa_@|%3-#3p zh>JS_Y=^PT9@~Z)iZT`kVRhiHVLLeFJJOc@lYKGY@!Q8rdiF-nE$f7}BRl0=|1Zzf zo1sS6pIerH#7wFqlBw06C)$zx{xH)C42@_JSCE_&nS>nlQz{hR_%}Ft=z2~1GIJXLUrQ17RAy7~>(zcQY)yLJBQ1i?kB<7h`LG?C{?AGxr+f+wDN5T!kNq zC9<*-B5v*bd6904WA;1XGb9hJ6pT-=nOg7tHI1*=s}0L{y*z0r@vpSy293g@OF*U# zstV@t$oddzGPSD^A!LkY?hYM&luaxQ1`m25UJ=f%<4Tf*51Dh*8yE5$>`$*jp8U_T ziFy($$DIZsoY&vOOpNlj8Uf+$rmp;6^MY?`WgGfxA4PEy{Y%fb6IhXG@^JM`1G+;+ zyZb~J<0HL{C%@drT78DxvT|b$Rk&kt3kz4Qk9f&<=zhJeDb^yyI}5f?bh zc_`O!_7b0tTPUu4F0S<6Kl3vopCaHS>ZE@#j~y%z9*8geR+SvaR>RT=&|4`ZdtHnM zKX&%kfuzll;u1bT=&AQ>3g^oo;@_Nj?pE-B)?7(I4ozeBRBfDvYkmJjaZ@jMv;h@( z`3ZQ|?vi&cPLbd{n`(DfzezHT7Q}Hwnsa^-oJgQWDMdSv2ll zA66;|uUf>gpyVE0s)pS2jm~m^685#rQsi&pxWv5sqtdOau4_79)l%c5g2U>(#wa!Y zBfh(&zVl0ld(Mz>d)78*TI?P}Ld!R-bd*9}b*k+)nrhIhUIkgu4g3<+e-~@bmsq95 zc?x(?i)EtQoPj^*T87i(9`p(E4(PdVzxvF^5D9xZUK1;c|6E&-8AcJgmLo z*!TB~)=V+U-TigiN2q+>4k?3}6&Vk|S0;YHok0(K&B_r&AJKZ+p>1yyoF(DQD?ef5 zCC#V)V)=Qlq>mn5zwl3A29`l*3L?W}j>u5s$9xXW^!+az0jE==SW9(*v!&6JK3-_(Bkx{&csC z%P+uu%<55q5s6W$t8tzBZ~V>bzdwVc){no8?0r1i)&?C{Tb{P{KfsLwL4`712_xS; z%9eh=DHM%JUuA@=L*ldCqU1VxW8g^W^$1zn3t*j!NJ{PcHj2ei0qrZPNs~&OSf+LG z8u$D!u*5Y-%DVbG(5LvYCA8URWJx?eo{yit^YBkdggM^Zl*{2=oa3GO8!!^pFoqvL z%GEfy?_H$REVYI6IJ1=Fd@wVtqfiD4Y5GG18m z^b3!!>8icXsWnrH9mGITVnal#<}W}E_-8Vi@Zq(qmFx1Cu#M68N=};|Jf3d>bc z`TE)1F56&$ZMc1fO(gKDP$eErmO>|+derZ=We=jbg~DCyWm?C zH(B61caCPcZ#)H_NbkC&F<%u62x%JK*77;=y74Fbz}#Q%Qyff*s2tAGjke;P@4iwp4t6P|zA9U}oCEKJHX$wt zv-4m1yO6+2rz(o*-L%-wQ~{{bG;svFKFj$H(I}l(SJS)A+9&HyGg$#x7E;8sFgFjw zdmFOEbH!I;3*-QrX+YOKh*OBq@TFBtlvgf^GP+p$d z_ZaeAaE-QRbM&gTCbRJ~N&#R1{7D;W!7cigQYwN|$pO0RfBU&#%X@T=Gwl0$P;0VT zrIc%TbWgx*F5;PD9wH{WCTxCT_gY@idlggkxD8tv9yUp&2gZqNSUy(0SbZ$lD%gIK zB@RdwgMiHhQ2i8VR`2K6>zP)_-vLlN35|@=>tmTM(Z{xXfr!RX4QClKdGZAQ8)Wo! z){%$9^R|K;#U_(q zs(2O6%ZxM~!F$k_He=hOU#rxsLI}AXBVaU{%Vg#vQ)&dprY{ihbVm?EILo{h7P_3+Z6H(NC z>GyKu=j#1a##19^zj zQcPEld!LemJd~B*IwJ~mawjZQ7c>$^Um`5B;s>y=0b|z=U|grXZn-`_RM({(tRH|h zGVY?$5zwmB4j7yYPe=S!CBie-k=~$4C;WKJw6?%IggZqF72M(TAjY`=>cqX~J(FpM z(3}ecIaxg$lFB#1GBA?fFYc!~4g>h-;641NrwL$gR2!Bo>7!7D+-g zcE_F6#5hKXp;u>4b@HCbJ{!>xe7dZERcEKbYOf;fEk1dfdR0UJtn*IRkvFK(v6jrg zg=Qt%B)&jO#*z!|JqMDw~%+TM?T4_#?)OdI7rnKkD z@vt!+&Re1WIKuhEnEC=N34927SOd#b^dqS`>=*B^5RyMa)mzQuU`1q`e|0@x8U~j7 z)ZR?(Ha2o|Vb+(tAe2O~1fcinYq!qP=pRAPC)|o2!ee;&T#5~`8Srgyx1&a4I5?*8 zV&l*7r4{ZF2kHwYpZto@67lZ^2DUv1?aksKtDV2!ID;V;tYYCn(`A+ae1hsG&DCMK;tnfQUIE-2dR=AG7(njLTPfSFK2w9 zU)h3HJg3hnZKNO_p+>Mztpb4|4VZIF=6k{uQ#8j90o_WplUF>aZN{fc6NaL3ZbTLa ztf+}plY7g}P9bgz)@_%yVaz(`hRuIJB&;3$E0EzfKC!%Y3VyXJkXawLRf--10@y)66y zO?sn%bSA49b(H}|(R4@t$PSot-Ob|?QaeJNX2M5EiqAkDmc>7~S@TN+o6KPV2-yaK zH1=N19SAsm(f{dxN-zo%A;xH}9T`(HKAXe=7@q-8K?=b=*XADGe+E`)H0+YpzZHtQ z&h&FwKQ)w=`>WMVCY_~w2|985HH88k0}T;mnNqfFL%}5M?X_)Thhm@iCbGwi`5q8F zN&jv5HUg5QqM?H!EXQ{L_GK3|YBZwPrmVp*J$?Yc`y}6uh-`6^8Qld^+M;1kEsO0k z=N%A=$HrN3IG5|~A4LGmrHUCe|2BUG(9$BCQ-;;|6vu%hP@#2O>)k;y3{opo300#( zk7VMn7)plB*L0|)1CNeEyY90_#v{PsjqOzMgsF-%5M$WW9#6D>oSr?^@!c^ch;lMK zL|)_+u&S=ZO_lIyygry69h!(|^9U2-wa1Y`H^Ib3;!X|`y>n-RSb6xU=i#$PV_lYV z;=UyNf61FS6kUyUou{fjZ&H6DpJ)%d{TlzY>T{z@@ZFdX29~m;RYPu~Dev?&6qLJH z?lq+87eCR~=ETx`P@MSW#6G+xJuf}V_l|(*%!9H zU|x5Q4#!SEzH#S}`Sd}t;ZOay%g*vznR|zp@{Ggs8LH;|#o#X4Lkb+bYyy2pNIwp> zhvN$5#W^pKACjM-h$n|xeh9jGgDCPJ?+1`1(d!w4K(X@Dti9j-_k*qV^qvI!?P;1x zNbe59DYa5jp>)~^*Z||U!v-D!46RWP7pG*4#)t?$USZ`qv0803^d$4?!RAa-9*`>5Q`cCpP@KD+JKyk@OX8BiR>)+=B)% zwkgZcg{FFuk$Mb=w5)%3^f}G|cc#@lIUs<01pFn3Fsh08#P+!~gs*YUx3zhz1?VV@ zo%wS4$GhZ@-+aGR$AUty@Z?t)q&Kh$NTRa{;sn0s1O_Ud_tKv#nS>i}!=omh4Gzz{ z95?(Sp=BO$)1aCH<@&t_cJ>ZNajah_$R#N5pn3bQK&jgt=X&9$w>u6bUp-Ja0&#@q zUiT=c&F;4pea&Ekl@*Nn_0AFEG|XNHr;GH_F$Pr8n&;3WOxe2uriEB-jfD0f22u}> zbI%iLgKUpSzuv{*f9tx}pk%lxMAW)1{6`bmpX3D(J+FzN+f-G|z-aJ1Ww4=N)D z;t3ZOfyR=?TroJ-q{JryD zNPRVcXK8m3$tTWj{V1|Er>@j?;niSvUhV}S93s9Lh$A@6E)8G=|2<=~0^55=K0uu`FANZ;Y#Qz1%evNd*hC=j_lS~EqW9E>C zL%PARx*W_zSQ_|&Ks$Rz_YyIt4nx;Zh^ZDAtE?j+kgS&LkTU!Q6pdtVCx;=z56kvT0_*g=^vepAcXU*s(MCBlL+^TFiAqpy$=(d6m&WcoFQ9< zjwHycBWK{if=K!KbIn0aAsRI>`vCFU+y*m_EGG+EAW#3u*}yEkXul;H12-jtEjJnT6U)ovA>SMh3}wrOP1kwbc3}t#B?$%P4%bJo`Y+H|`^Sa&2>tH(~UKf(p z)p)MBryVKlp1^n`TDF4|pOGe*BmQkDB+(N3e63CIsn){Y){$PdoKl?FqkuubS;yD= z8TamK8v+0L``Oy4KE0+Oqdr-97X@~i39}x(15*{tOUzo~vG2HPF@^R>?g#VHBGW~V z!RGEOc!ct%PAL<)Sz~7c5Pp&M*hrT*dFmO`h`Z~3^%+0?Hoe(bm4f=$#|Iy_mI?)9 zuN|>Tsb>Cv@18R>L8ibqv}PtszHI{P9QS+=#byu~u>J#9EQF106Xba%L@2gnIQ(f1 zPe*4WA(60g%u0$>2Pm3vtzYY{zu?Rsh|Te`R}Qfi`ad;ri>{8`=f1t|L^)qwblL_Z zN(yjshz+Y2KzEPQPU?H^7fyW_M^$DA;_}UZgk(_op!mjHMdR>YTUC}GT^YtAKw!v{ zc>_Xajr*C=qU#@<3c`Zso7MJDC;XP#r_hYwhd!yhr3IlpE3B06@-;-NDL<0;aP6B=_r zZ?0bP$v`a(dE!qa5%HECr31)pbv5m|d3ty#uwY8$a@wUdHA<#1^owVP0zS=;-^M_p zp*33^3$D!(m_~?GtzP7Due}oLY zC@4Te8QH@q=Ddnb_ZsLnUb<+H{dja;~aF_DZ ziXnV5i(@kUvMD|j*t$h!-FTYLLmHzst9zoatajCA4@5-xKy9%i9Ks;;;X~GQ1OuA- z4@KIS(u4f-$KvN?64YDa7NRw+4v#MZ40kkkH$$M&xB9Yr$Q|sym)aul=qfi658w&r zL>N@n%4$Ww#ryWPqe%knS~5F~PHq@p*N2V&IX~<{N<&Z%PV?;)+BVaGB3cig0NHo( z22nvs3|M0<`fA^|tPF(9tyJDj)7Rg7#fj8uoR|s&e$u1|_tF=8Q-$$$w!ZbNOyhN1 zH$VRyj$1gaOx`YMMUQLe*UEc8LlMZv5qTBuVv+Ug*}*dbr*AgT%P&a*d>}MjL>fJ2 zHJ+)uc*l9ZYM!t2DXDo28;Z+!;UbJ)8LfQ&0Sm6ZV(mnB`?&EWzR<5h9Vu#J6 zp|9Ox93Yt`C$j5#)N>l*Ip3743w;TRjflhV5}gxN5{)^2b|)*+fldq^`S7Z6KQI&nO=X_ynPJoD+gpw}nFG>6`;%8N z2EWg}hR%=|EG)ydLnjh~|LsPRb9W%DYfD~35n-l+&{CxXruZ5yo?}?X%PO7XT%tF z-o(Aj|1nGeq^7;xgxo_^WW3x?mYW41zh=clQG}8U-c!8v1e;vf_H6HM{qJ^ZL+iqR z8wJUr#W@OzQ2EpHD8kF*n_AmNbOt;sF@t%wOKQxIs5C7jx7JhCHOcwah(d+)pLcVs z_PxkzXdJ$yWh#{&I$`NOkA}cW%x;@^TKjKofr1#4rRrmn3%`|daJt%0C6f)DvQHpX z@l#a&T;uW%(PFhWza0_4sWL8d;AIuBGo?rca+ip>v zyXZq-VBE#8f!V+O!5cjK@paYoIbY3;PYAq#C)z_!-`5TXybK0pBvTTf%LH2C{Er?$ zlgkRE%wdRU-ZU-z82k>oAwDIS-z1apUPm9_+t1wNqA%kOZfD-v7wbTp{yhY4tjCLK zKa+8i!-83VDkY0D#PeST@H=e=0_gSWZiCrwL(+B~UnrTkb)vU$LDf22RqIL1XksRi z4m<+t%7tTnq!Owd_$h=-JCwlO%7Ct!kR>Pq#|s)-ltz?Qx-8w7&!TTsP{`}ecfrDsz|-}JCt9_ErDZUK~TI^s;J>c>iqk|VyNxgmzv zPTBnVp8e{ociFD8)W1CjBHD{N2eHaIZ>SIT>ffb8*P0ZRB~1!e#A`mO{7O#soDx@g zU{}D<);dSp5?MVE>$%TNRQ!HdoPPrNb4*}UCHUq`0MlDIDLiQl-wjW*^A0V1#AHtV zr+NSZ;RJ#iG*LA?P-Gh$6glVu66>MgC&EbWj$bJU=;aj*3Bp=+pYM!9V_(D8k)6WK z^2clvqK2u|)sKd;{qUz=d(_qGyxLOw4~Q1bzrGrA5KFBfjo-Ygf8t4%Twk8tZ8&oC z8C%%e_j})|&O&iZM+*c2z%0-m-%%+s@A|gT;50^iI}v>Rf_9@t?g6r4?-h`OQ)B7S zVM}GuN=8eJinR^!>7;%g=Bstz5ro3(%B``NkR~|Xf;16&33k($XK}x&`AwCzN~KGk z0E;w?W_YKTw%u6OEY&(sk{i)ZiFQ@Dk=fR(Zsen-yjE^FY&z52$qrvY!#dLH)r%NL zPETX0q>O9ZrIDg~i{f6?TG;8*S&_fvuoJnml*b<8XLNBfsC^$kNY2Wjg8t!kMH=9< zw3UfhNz5FluDK6gdTDS9zs)ICgs9>{e&50Q@_2D9T|5^9{r(7c$SWBp* zgMj(EDenQZS+CJ)c76Gia1aH5_)Hw@;(q5TW`(wpMSrU7uwwun1lIkt`+nSgX>QC1 zI*w@2ONMD$*YxdYd@D!8+plqajsF^-0u3(0hvc=*&i}N+gP7&_Sg&39EDk%w$Ta&SOT`^-u%u>F2iTR1uIW~_Uu*pX ztDC`C&E9<9pOb2uCAC#SBxSe!>koo05B`Z~PdCz?wEF`qdufDY-=RaGTz_2-aooa5 z$_2R-O5=!2u!CNdXLbX0+?s4l4LG#H1@ia}4~5!)0a^jIPaw_zdsSGbq0H+V3R5f*LON96YU)jw@?8s?wgE5LHK2JjhDOY}$-v`A=p>&FwMwcoC0S45d zp}lUC)nZL#uq@I5D*$gJf{l`5Jn&$%vJ6)E5PBbr?N_IY@|1+?0V3&A&W{ofO4WPh z?GLlwF}phl`oxWw9KHySZ-CGh_spq={D&d8z+4QxjNmqWb$K z1-wszhXM*mXVz0Ik7+|BedXeX8+P_R^>DO9K}&gE8>&xhV}BW7 z1CER&j(NUMoEzDp4{E7Dgu6PNom6e4lO^wK-|AeXnE!PC_hU=OM$PEL99Jc(UvqJG z)fyiMdNYfuGOk}YSjI5Q?snVSj<%!0Y6{x@)L%t3+u+69Dq;)1IDcqft0hkAG0so@ z3oSl&)`U&W5tlMp+I9uGZTT8D8xGW6nVq$~R4$(5f!?;U^Z>-Xmfsgg!Gj$qtN+-& ziTZt@`YPfn=)Ku_(=6UTEHWHvsmk;1!T8X5Brd3&kOs-V zLT<3s)w}Nj-Oa7fn19n>luS6M>cMW9?RrD>@l2 zX=TFgeeaq(DBgoBV_Aoa<<$bj2wO$_8aCI*u` zS2=y2)(9e;YlI2z44$e=+i`9jn?Tc_3k@5@n;6yU*k0Mj67EcVF7jA9AQRFJg!`|( zrTRxcGO1I|CDHv=?pINa>ZP%O(rJhXytg&yit}I0-Y{wIW3v;L7Zr>u88yiqz>V5_ zgUsXRb-(>`{z+>`d8H6T6*#LFFjVEIvg+QPjRoftFl?h&N2gV?C5!NbyQafVMY67H za=Ot@^Aws!3~j`d@54`DdET>N+9A%(bb8}k?SqN;PEmyj5?8}AwdDo3F}pWRI;xE_-XeY>W;;DcjA0@R%^ z9niM>1{QXH61{)9rbZ6A>TxX*U$7d8`Sm6`w$6qk#5;YT#zqwBR-K3sziN2#@)DbV zL(S7bYmkXfdlT4_US~%!vNUr67Ukn-unc27WH1Hh4Uh?=J5OduE+>-V2+dFzV1kU10@2%Bbx$^TK z7R*WGrv(2&S_Xp9UND8@;*QFchcPFS*!}mAEV_Z*w#m>F)8~I7>_3Fw0R0FLDOYCT zL~r^I(g#9>8r!rbHbafeMVZ#pK~SYv!nW;943H1CN#AI*&iW()TcPmGW=;uJp-dLL z?9A1VlGL=BDt-N#%+!)Xgs3Vc{Cu3D{8?pO21nqNA5LdA-cFQeO3=1zS9MNMa%Q6~ zZMHbe0UTg_KF_-TsMYj#Vb8}Y^kfrCx_FH2%OZ+a9gmU+L&Y@Nxf{?plzQl?C~v(~ zC#eE~V>6jwl=Ro2J-lB{1@;C~;Hb}lj-^D}1WqAC(Z+Q54!Vm_g|;w*egc;m|T7j1Z|I*206_P&NeHCasOvQ z861w>U@M|CWGC zp;yHk=Kg$t(jp0{y1m;)+Ot#aZ)~%{oh3o7i}Bu=aMM(*F{nJsuWcUh)gbeZ-Tu$?k9iJ4`yTW04*}IiAumO1Clhe zf7^Y{k)Yb5W=4}=StX8r=;8~oOFevL+D7+$|A~aa$4dwGoUNs@{peHp2CRAMWh-_@ zwJ2fmC?Dd9VP?uZUu_2YO!?7udycAQVPbX0N*zGs@&E&oXg#S784|{8&r|{M)HZY# zR?b-`B3SGJU{O5qjxQA?MHqo3wCD=a5GZ#y0T#x5oMV}x-h!!+@3Gu;xWn*Je_rWr z2af70PGh372Xn~p(qV)<4uH|9#HFcVH$lP(0Sb*1jZ_$ix~Zi?fZ{{Sb4pJz*!i{gM*Q&Z=HqYC7~6(z#KI7Lz!Bd|M>HOib^S(1-XVQ zvVxF-`1(_x?F~#IQ+iONWx$e=0$Jk4|GXRi;(0J=_Qt{gG|FKj6)S6FD78|CyuO@PoIvn3HLt>{7%`)4zr`)EZr``==( zz)4ZvUz$tN4;fZJ@nX*!II=c2d(8DAJMD@nwQs$`J(km+3g%v$#r`Q`=@a_V z3jS>mcYgMt5)n+MA{B+~4*ZBMf`yHgA825wQH|S$61gC%sF+-e1uH5dl1#1yv)?XU z^Kv&~8auwr)J*9%47hes{SR{ll>Sl|p~ipK zyiTgMtDv0@eWvpBYm-$yFmBy`5^I3ePk0!ihJAK`vEP z^JaFPg`)AT%9qa@-ib}jl8G29M8XT?hH}G?Fq$oCBPmzI;&EXl%2JKw#mPDSE< z%%t4cqWoajP+gzP4&oK!jbvyF@hy4D6a3PoM(tmwXy?bd5wEtw5@4d4SaOMZT+3y0 zcr_;kUc587T`cp;>TpGVr>McyyLOZ^%G? zR~O0Ih^2`F!QE3sOiD??z&Z(m=hG08=1!u2zKK4bc&H+-0e3=EJDtb5J z_w6NiFfmZ=rt??@;}b|>to+Gvne`Ze8`L%xRyd)=SnXWI3bTuGs|QHzFjnc-dcp{5 z_?U7$#~&;1B=JsfK@noTeU#M@0-v|~b?1)%j-ac3SM*cz4*z6qjSYTt9P}7^HO)XH zcWcLVbDT7nvN3y9au)HEu4#@TgzrWv@&AvB7L#;W9RxPM#J4Uj;x*Zby8J9nGoE9N8>s>-7HtRD# zY=?IK8NTwPqjyXxE*?^A4owN-i5+Tu^26O(m5n{(qk}ChxC|$tw0qA-6Lv$CvSNi= zVi%xPtw2x@E47DNKHEPqIC63Yza1~7{pc4$83#4bibF}{R3NjGPdhSn)c?4rJ~a9L zIh7Z78t$ZUM1S_XL5BB~xu?})hj&%96|c73sj?h*oJBI7W$m|O3&EA`L*G>7X&s zfBLwcq(ea+j4=AflY4{SX7Q$Bg=ptaN!eQ!0TK_lvW2`1Mlb~Xqsf*+w33oPAxW%4 z4paU?Q*rHb&6c-E;gpPv{@QiFqRXcVMuS2xPSowIo`^dQkN$|?j!Mbdjdp8=$)aYX zKqX*8!llw?cg0c|mdvr^My#XW+Fn+hPL9T=cO4vs!!zH0929oCj^_S7ScACqp#s4~ zF}_2m!CKQMBZi-lsY-Ag333WaWJvEPLRgBFFvQ*1{jb(o4qkVVSiNi&%LHaBwSU#Y zrZVNcMud>|)M7a07?Dadia5yNnl4)$?MTIh-G(WL4m004Y2_y=SN## z4gd4E)&4iYk-8s3Tq)+TA)*1Qnd^7a^E!Y_R&VeaxLDUM$2Iz)FO~B}-ehwS*!nj? zKqSDGd3_0igc5Oj+vCHuV?>Lb@BOjw*Cxym4sAZ6V6cX4uZg!D^H3_2ZYks2O>)Ls zlN=J;c?bh!5`LZ6LZ>mav~SKu)^+BPUpCdacr2bD?=t=T2V``6boo!J(X7`eqGeAR zwHrjr%k5|Q82R(E4_2DQZ~J({4w+X~AsXA(#At&3zW_m*%QA(H(3?y1T;lL9Yy|q; zACH;rv>rv~IShiocu>*JC;xc)fwyg5vx0dNArGn23t~Bx=cRmZvM7!pg@w5m=O+KO zS%KoaM9)|5<;yv3yM2$hskmf4T`%pTVVK2QyD$jCs7aFIl`g2tAlE7$ z(wEk}uY^45B?BG_KJ%BTrq+dR*Ej5q#{YA-Zj)XbpEu}Ck0 zbINv`5~6gW+-&-tW=j@H@`GhPzTC}^A}T;0juIjS+jE~P)<)z@3Zh`oD|jkdUv5AkaiG-@7lFrVrz816$s%B0Y# z)-`(=GZESoXKHJ#_Qqfa*K^k~1M{@gF+1k55QLZirp6dQK*TO(5|NS_*W@;Z4#6i~ zK!oIh6R=d!Jj2Tbszg7;RUh31EBB@jUBA&}oeL;+~BW6v_(!?g-}X*;$o9xGpY@G zbm3CzAm4tE7|RSZwMtEEnlrXo_wE5Ae%SeenMKxRB$G_6UjBd!;YeLdbE9 zI?sw1xA+Spxx@3y@^v`{=G`UYsCT+ftFBS&iX=d;hBg#9Fd(e;Om8e_BV0))rTin+zT^}ya>$i%g?aN6T~fD82xx#jHpRPkVSZjODCD0 zZW-<_YjWQ^$h+ql4r_&kkT^@YN>#x%4t*_%Gp({t4Ui>CWKI&>w{aorey$}9cSK=n zU~SO)-*FE7a^>zGLS8iU7Zil+XgCpIF=tUx5>rl#hA-{aABm0R%p>0Gy_J_qO(~Dv z1H0KP4eN-ry*cz}iDtA-V_ukj;_5Z-{;J}xH-xc#)c5G8m1=H?RD!z`*<*b^4rRMl z6VLH8lwIdd622I?QF1!-Q7V>0zk6;)aTM!)Vl%r`KL01o2*~ww#Tj#}(M%}mZkUT7 z*ZJc0m;f#1<8 z?FAe@g>I*-OP24tpvr9xP2VCH0KAQ5nf5ED8qVbrBv-kAaJ{R3^&S@Y>VwhS=dB8g z;Q%F9f(9XEXxB*SusAhJuYfXt6jK+C1}Y)YLG;!`$sXRC8XkNmzeVbTGEC#)rNGa0 zzFBsP!$E@-d&}_>I6Ih(Y=DOOyoih=L=#H-mMs5=bahMUyOW8qJ!nI83>I3G6Z0FW z)#NhzjXQB&+olaTp(qSoD*tMTGXo)1LlmZO<5YYOvaP&}e@E*^-e9eT?hwMer-JV_ ziqCpTgrw~W@TaZN# zB>(V@In-!}qa1QQTM-bBq?MZPwAh9;3h&DKU zGVNxyM*>yWI(*s}!*(*SV~BXvE*6@Ssg0L{tO#!V+>vr)w2_xWV4=z#H)CO>YXcCF zQdimGBXKS9ydjc%FKWPag#$nKkgG=Mb=tQBv868dAPCGF1%MH94!G@i_?RQ!kcG*D z1mf25dE3+Sn$IX=MX@K2%X{5ZqQ>qzs~WnD5C&_L&!4qk+%gEql|OfQnnvn8y6Y`g z8-S=ly+p$<5v3m|(snf=TKP^riAU%*BO$Z$sRn~QjlnfMyk4@M?USnZ>1c5eeLVx? zVTYXS-ouJnhNf%f%oiQuQojW;y^N1i=V>Elj8iu04dR*yJfv3UcC#m@#U%H#BcEuP ztU90NzkcVBHJpW&-sZ*ZeW-Jnd~A?il74MGk3rSV`US^a9P467!(3>EJG)xO30M4NG<%hu zkn55DQbs9Zq`dDaFfm0NQQuT}pT>g8Y!?%&Pr~aiL%V~kG{WPcRBFO{XT$0^0WGEW zZJSThSd}Plu-?2m2wJRTlpmFibk0q=Napv*A9Y=ql#?Lui-b4t5KVpUHgn3ww_}bZ z9fKJr<>qn>{@8$99?ce|`ZV|_`C?)`I!cBC;wrV)BxZ@ZLGF6 z(Y_|$gWxs0hz=pht2cmx#_@fuF6XdbU^zxitLroW9zy{E+x2FEx&2R}YMwVVqpM5~ zA#=-La^Tu?a`s5DUojogXt2&W(bqAK8PT}snJ65}RlEjKsG>VW60l@C%bO%`U&7Oa zj^%o~oPKS&AH(^TLz~u6omZ$2u{vxE#S-MgqMTcvYIok9NU2uqt_}eY<^JjBi<2DM z4pe%NcPFGphNpd;n=3f%w{*1a1w-zNq({CVRiG_i!nJ&on{)4v!kwSPgw$OF8gPr< zc{w5$ORt*oNpKZU9?@LVbvExB@vkpM)LLZ5F zjf+8A+~Fv~dkjK z26p7LO4xid;n2SZAAHj}buNfda1%(Fdhdi*&wEs3#|Is|97%7S{9|&W>Dq`&P5@UlR5{d-mRq5djkz)ek!pqc99KUPJ^gHl}u5jqt*(kp58c-}Ys zd07!YA~u|jUilraHL`EXRa+KYrO@4la|xGR{gKD9np8FL}4>G1i~Lh;z*LWdW~ zAN}!`$o%oK02%)2lSR->TXmc3c&pKrnWWT!I8#EJ$xw{2=~0>S&TwDC`FebqN#8WZ zb~+7A0qjXg&=q?efd}ec^rSItI1&tM)arS&dB~x%J2IhFgvAE=(JAS zKrl_^@N8_`d@A|}r*0^5vjX&W4(-MTo5Q7`rKm7wE`^lW&>MRfFEE_mo1#;pTxji5 zgueYUNiY6SFYC5-j!ru4{e{$X>6zodT;p@^GdpJ@_(}_AV>*+u%#MuHj5eP^&i7m8 zlrdS}h}J7q0#h9;XVY{W_h&}+|mk} zpPRAbX&nu&`4nnID?=Gj54~}^a?fecPslMvEe`DB{-+Q8?t|=5 z$4?OzQ+~41Wq_aB1W$p&ej+bsbti}@ZVi1FH7iFK7`Us0+37qu zf5BIK_$6qRZ3Qy{1P`gdc{hzl$LehnF$2@@B^aFzIBEAS^XsK#=n`Z@q&yTsS}vg? zCXG)f7YY2RAtN8$vSsxE@nLSR-d*hkwur};b5gchqvi9TI?*T`PvK8rO`9&*cA9(J z_A)kdIZ%~Re9(mmqpm?S<-wQT91dAmonB*xY{N5*QA>^XGdahIp#BkH+xklvL&(L( zQ))Ik1Oq>HARVo^%+beX`vqbU5@PF}EU*{19U`n(`$p1#W;X0*`T+B5@{(kk0iI$S zCDzt}W`6Y)8i#cHJ1}9v#01W9Gnsy&7c?xtZ;rTuQeP2+h`@k2MIaTZGeyt5c=`at zi5fuFC9wJKAHvPuiNhM&v7z-ioyQ=}sY~5n?(F^IoQK+Xj?f}*TwhY4@`wuD9A&8)#&n{p>;LUTP{!yXtr=XOc!Wy^TxyhHE= zvl^xPK`yTCe_u{=`;gAyZWsc0vZJ1ruk4qR-zn>zyPC-&H_4H7PHA~_cyhq(ePzdG zZZG@e!#Y;;er158YFO&?pP!a-Op=; z-%u}?68bS$Sq_)~p7xLdc62+bB~3lvCkw>_$@RdAT7IHK+fqkSYbm;9kD2;7skcU6 zZTN2T9wKzL$*SWIdF$NKJdSbOO`t9`9)kjR?#y53$fLWPy!gQbi!76!2W{ADQZ=|V zpE|u~Z!6_UhRC^b`teg<+2@=VNsJqF#bL2Cql>RTHIJh)j?Z~>=citSCcn))A4bdk zeS(|EWUO5S=;y7e%4GJtZ*oFgP`~JIzxx%aT#&pw-?`#ti6=LxzWf$SvQ!~>LpB8(MC#xBVinDoYhtpb zxUCYLXX+!5V@_mH2f68yaxixa=8W)YZX!Mm9QJy}&Yvy(q=fC_<{#W3r1uUjRiFG` zb+IkUHK$zlVUpnouY)(mpT-rc_fkIJ60wrA?3PHG+nZF0J>4U;Fe!)sDw>zs>-#3B zWQF@g11yE^I}Y1&AJTcfB=WgK(q&u!}3~;ga5%S+SPoVXs-3>4QY@L;5IgR;d1Noc$xioxVPW5_rH_) z7`QG^i1H_On-ZSEo|(GYZZwt#{+B)LZU=?dFUcGatsZFpJP67>@OZPBu4nN7&}#p7 zz>?yZAObY$4uD}z;ru^Don=&1aU1Ok0RidmQo6fAq`Nz%ySr6Hx`s~a?rxDB29U0y zOS(b&9^Uu8_pbTCS}Z;cv*w)hfA+KYZwo8>)Ow^Nmvr&i&w*k{`0S!$(?@8SY@v?% ztjEhODzHnVimS9LZ}fGg`gyFp-eQ$9If|Q8h|@&{8`eulDhdgCpHv#D5t}Nvj&kk1 z_PoHa!z8scRCy1Qu|fB2B=aYuibTGBoxy1#`8C`~vk*u^ZjAnif@_KI2gU-P**XBC zuK49WgMvvE@;2uu6+YM7XqVq$!nrPg??BYr>P)xIwoI?gg0i^VrEk{PvLvoZWYMp& z*cX;TUA`vU>A}GCO<|D=nglzQNcjcS(NM#|&@4iCH=?Ng=5}ErkZvuu9j}{26*FsZ z3>=2j_@{lO=%3LOAr)P3Fv+4O5nFWtArEc%67H5BDRn2F0Vjll(g`L^O=aYv3|N+K zCP*4t?w&ZjF>~y;pRc|Ea>>PSPRjOsvxIbHzXoX)P5+gy_dg~*u5Q}ayF)lLiFV7? z7T2?4Sk9-6PS6-~6&RKmqbsEr*q~8*!st z5(uG2HD|@IWcjaXWAP3Cbqk=Ka@u~#inhch?`K>ArXSR*(&s^Np{LowBv8eQ^w6D@ zK}Y++fF06>do7=H`i}+8{Wx8kGWF+5@iD_x2P-Fw5c>{7M!<(Pfa)G{RLoq`R#r1JmY4mJoV(is627w=Imu3yt(smMmNJ?E zC8%&~IzCAd^9majnUo}|5z0p+6v-vw+Xe5I)=%w@u>iJr{qbxeoeq;6U>Dfvu&54r zNDGpEqU)7QKTxI8)BrmK760&I8$WM>05&yd&Zk&w@NI)!676did7l${&gG_@10${m z;l1maHt&Zk(z#*ne!4NJ`i6oT*JP0Q@1$|neYXM#N>7QM)a~^-1B3r+DczGN@SFas zf1QrYyk?!hxikU7qlC7H&_0nZ(fS}E5ij8HKL8d zP$}VRiV~&FVuGFodb%pXY3(|XGsmyEJ)F-9>0{3}(03zG#;x=eB+D5f_HrO`2XxI(r4-8gjA{qVk_AM+Y5{%!~6Cp?%Lfp zL$+OY@oG}w+)Z_wTlF?mqD<&T?1A}Oy5rST7YRCeFxPu{qiYCQ*firYCsDpUw<@9J zW|*KbqIz}AQ-FM$Tn#pk66>GC9T}B7WB0NJ2TNntC)$gfxi>4M?q4cWtA83+AIFnL zEKkRgzL!>Xh$hw6c8Szv&SOEQJ&_L%LS;{g+3ozWwVk&anz_y-FYwxFCB%R1K5#2G zBX!I`l3J=7@Q-G7EZB#MCVEW(E zoT{+bjTx>xP+EQMm(&C>{`7qb4quD~0IV(WEoGqx`g}*}so11gry8tPs@_8$U8ajc zDV|&z)}`l3Hv;v(Bdm@Qt!K8r7N;gdEYNh}w4QvA5kow_!o`oVQ~nEQ)94JwO?eoFD6m>*LsSanajpVDvpbzJyB0d^H$g z(tx=pgbP0Y^cnnE_o2X@>3$6;XyX2N03ZrXQtVMfFz>Rc4&d2=0tJ&X>>sGL)awG< zA)yw_o#whDpR|8m^MMh#f6Ve#b4i?B<2?TQq!5ewME7JH5UV+c8;KxM8L^3g(sxhZ zYbNfGDJ}i~HRo*j=n`EGt1M6W;)a}tv0rYKdjh7R{sJ%W9MGtHVMEY>4wrcnA*|t5 ztMrfP>*%QQM1860T-J8Aa)=GGT!N@Fjr2f`fwvAO~EQi)Z%h?H^W&*VWK^o$3ae6MzA%eGmJ)>-rtx z2;%GFE6at{w9`x&WjBSP3$Y+1w1bSsV^E(}+izt}s9Pl3Jz5E(HCJs^QDwT-J)?Uy z&^H4#Csq#CMcX}<=*aks`3s2Wf@7UgE(v&j00|(k=Vz<$ZDI@g>fzO<|EbJQ(^BIS zzj<7M>)3?b`9FYvztUm31%7$|Q&nQvy>sOd{ckZt3ivyMP@zXQwVO?`#XMiC4^L=3us!ARe()fKWSX{~2m|6N9ZO>zpxX^VFJB!d zT#3w5(5_(e1mwM7wt0v%ozbcteG=>Qhk?LTspusGgvvA@-L`n43_tJq=kf2Xk+xdp zn#hjKT8sP#Y|g*<_i_ERnbjgqnfixR?s@ESfXS~d3_b+mZ;Ps*xr38!QdP<8m3QN^ z4m7`A1z*-;MrRS`k!_Rd=*kdEAVwZ-0W2Bbk7cnub~At6uy-Se3D3pjjpRpy1Mh2Q zT5CJArrK*em#uQ3-CYfa7vqbZ&y{Uu2XZixqx9J_*sVH6;(?2l!LmK0_oD{6(aE zur(Anzbd7wD_#7(^9b0jB;8-b8+=8dfr^`5jZ_Bg$;Q2#QwVuS%&Uoy(jW^-*gv(! zs<$;y$nx15jG1ONyggnnnZE==EKcRywoO7zkFr{-g0L@#KYI&CJg&s^E3-S3SP5s> zMS62yYmR19l(Pj#9`7!j916=56DVH*UXhr^>1x+A|4sjYj_ts~-JP@-Mq1zBCVh0a zxSn*__jMIJE(W}Kq9<0($i0k7wvMaRb6mP4!>@T$rne#ly}I zt}#TJEq;t5V4JJPp@v;5Fxi45R>Ck$g|k>_(?G^^3xSFIID_0vc%nAeVRr9?uF3RS ziu#XKCg>JRNrO9WQIjVx(FIpA=xo+;Whk3%9hO;!I`6!Fs@KmL*svm?hQCTr6L2nf zK;e8j%Tam&%yvSKlnnhhgLlvhU;*hX+3NsO(-13GAl5Fv0r%i4Xj}w{wni z)aoqjLKDJ>XMg3CMszu$%My2yy9r3&S4#FSy`iq*!hp;sNE(K>yNsx5z}_6cbaYYV zdmPN=nPvL2)N(+H(;cWO9Y@Tf<8?$M;^(PCre(?FKYTH>k%P$$wkQrhtMtD;IUZqg zPxVE;>o=CUEosalpl=zq=1Ed7AxKc;=}GuaqRzD5t1`&F>ru&)))OW#b4v#%kOMdA z{tUyWJ5Qt^>g#4jZ(`-m*%7zm?`{1;@Z|{Zw?%O*Z_MizcjX@9EPNsUw#ec~(-6>e zRs4}vdQxUh^`-l6mxRThCOh*?zgO^!0JVIhZnGb-y>Bey*u)YO+HN+v(P#b?rx~A< zq(5AGuy(*m9QM{bP$@gDMk*sRu8i)^nVFhQw3NkpATIgeW}V$G6-s0HbxT0K=|{Y_ z@-_R9<*>N~_oqe+brl)6+q0cVvlRt!+dL`8A{9(uk&m}WM}kLv9koXVtW#6UG+w4{ zFZPf;_8V&X{KxSZyM&(ZQhRFO<(x&YxYe;5wf`l3&3-V8y;R<0i|_9@tOjNMqJ1cU z4;quDpkqbEla3>=+N*;!_&UF%x|RI-57-B(|KqQ>_tOkj67ux&x?m*yPi<4mqZQ zjOrdhLf;-1vl^e$JCj<=DlcI15fMa;{)ha|d^KlG#OFuE(-E~H9E|G0pP!$aLvL&h z_@Sgez82r!1C8XdhCnQqxG{XrMLE6_7rdLxvDoD6CmbM%()*rbJvDa`Stm_oFd`ojRQzQpG8{%w##Xlt|5FDfZQE z(#~JZyTW`KF7a2bxq=sg4h3PuZMP{x1sv>4^bBekVyq z>|XxF#=k}JX}qN^*Estq@IPLjURy)e%`bnRlqD6O97LFrn33F&kf3Wm?&DZRx#E|I zxE5-HUO3={+<=RSM*Df?4zdAaRgB17XQfp4ffQH9ots#&NU>c3V47IfVX@wYwas`6)GOpjvYoRZ$W zV2*J`!0oEjwEf{fTcMrWr-d@yDU#F`HzlcXOPGsES4DoNH>JquMvP^((xSN}tUgas zaLE+2vm-zKG(YM08MHs<4oytJ+@9jU2%k#by8PywZxvD@aA$C+ zm~Y+hME^6o}X@(x~eV7*)XI_zE1_kw~flIf62oZe=`^JhxOOg7{FZmS;wBBex z<;fagbM7n^03QJG5xnQ`P1A0$QtDb9;G@CB8d5lMj(dRibD0gclW)=YihKf`sx92k zt5CzGEjcjkv`XJ>K=*!#O5L+XX~5mZQnvoneHmY7pHVjh7hPPBXpb>U@&wy)j_@ag z6$4STs8`3WK@LaeLUE8c={_SecxLtK{1nWkZuoi2yh%%uErg(`)H*j%n@J|UQNZ)v z9+!~Ug0&nQ?HtluUG_~123^*Vf-z3-ky>8e1B z?Bm;Y?V?B4p=jLzWHpZ&G5PdUn33|6;dLW_xt+|EiP~pP>Z=&AyGWST7EB)H%mfu z+-xeBbH)3$m6wvN$(G10Uq_yoN9wFu_G2YBm0eDqekgO`HolzT`f%rvjw~tybb31z zUGCNSx=&WZKpP1O`iHStC(97xGQmeSr55%6D+BWBnY2+lmuU>QLqJPApI$Q^YtnqS zE`y$L7w#!gBoM@+o|m`cG!q@Q8T@NC8D8dQRO42ZfzLx^&E&z7q9(Qc+*&@)sS7Xw zBtELElKbxm>0J&HbI40R4a)m1kxQHzf*H@%J*w)9y-vDk--Lq5Y%?!tNl`0X0)0YXFj|T%w#! zYB|feZLL+JOePchMEYUI6=;mzFGbsopSA)`px|x?0L$!HI7_L@Xj4!1q<6vTfC~on z%4ig_+t|ZEO<|pJ;rZ!`OFyZWnQ1o4rMg&5*5!Zeg^ve}x(S8OBY|8N+knA}w5W64 zF!<@(&(LjR9$;w*h1bnIIlKl`WEjT&{Z3&sIT7<7YS?S55&L!l0Tmg@=ry7JgCs9= z$>%I1!YwA6YHR-h{iCU|(W9M7t%mXn*d7X}g*mSD1a!h_9?f$83>l;V{Bq}Qg zwkEln`B=TGI5+b>ZgOp@%XP>vJkRV5QB5P0+|`<_Zl`MpmO!J+NKWNwPV$Ud&aXv1 zKI$|^$`R7Y?e8qLR>b(8_Hu)-IzYwotnc`~&W?ST0fI$dtJ>w7BWfqt)HlZ4Ue62n z%icpbp%5SgX)lkZJsO#O`&ML-{)56xD*7GA6+kNH2TFG>BRPg4$iGJ(AiHIT+Xv1M zo)zQn*o*`FmrEju>|hiWHM#5_Wck13*Zrw+LxaYk(n8{>NSmT$b=J%M_=i%If!rX8@gFM?=?j@ zy_m&j+{xed{-*ms&!t|up-^DiZ>vzPJ2G&AUtIwNecOAR5hn|K&^W^ zxqRDm+`N3{$ehQK`b$IrlwM~**Z7j30e=-W%GP6OAaSnmXrV0q@|J*uu8$0{+LQ`` za3;kE@2yf=3t$}A56=t+qH|LzJCN%LFNjY6-j}1ZY8o)VR5bk`he%pBPT=>#0FfjRK2e%<~y_b}Jhr?<6ulE_zp0nr=3(7)T z^$@YQ-fI{rqbE4w=%C|cF~*hN9Z#|wC~TwtI#T{ihRichPy@Q?@f3=By*Ae3&Gbhc zB?{M@%)%hoa-#`DEsfn~HmVvU9%JLIX?5+gt}A=K_u%ZclN4+ol+k_0I5yFR@@=P% zLeGkNhKo9awkY_+;04;X)n;A&RnPVr5;f_$m$L>)=brcCrOd_inBef> zh%bF_4yas9*UnJaSoHKZsnW+eimw>ALSI#1RPEF5JCpPSV{U}n5|8IQ(Wo#hmf0E; zF_Grl)FRMORa2LZpJ_Ae>kQJC0S|7CXzqgqv0U=M4%cwNZuL~sC$bqW1K!G&n1Z5) z&xP3_KOA0|q=DmGZ>rQgR-48CK!Cp1XfWMq7Zvpl@Dv^RypA84M6F=%3E`^7^tSNK z9RLusjB$n>&Q1^VOS_?0__iccQ%_p564)LO}f8_}FVS1mIX zLsG)`44M^3`%|bmEv*rx9S16^92)^>^dA5)&<`7dD;@A_)ZsCt#-(LwTc*Q>CPsmJ zl_;dT!D?{id56!9^?1-FL-*Ocn_x7GMcO8ew2Tc#7G=pia2t zPtV_>d$bU87GM@-MpCvyPpg}S;HXlI%Y9z$^vz%aY+ZHh-~`{qOj$kvp%(lH$Zj>c zAyQrf$$ZTTBv27K5wAlU%qGXqKb(am!!cN1Ts|i#y;g-bTB&f@#R%V80M1#Pbt7Ba zKM2~pa}mCINIHlI+g)Kky#1WZ;(~p??WxrVN-S`Fgza_`=xOmF-*nftp7)yT%dZ3- z`tpk#DveNfroAb;@)+i$=&O2r&f5MrANPdKRSyX>>6*B((mJYslMbj}b&K44%SLsZ z1s*jqyd}O^y)3>po^AfIBl-}aP#Ru{sfPn ziNI>P;3M|sDe80NbPYI<)MHV;yUeXdsvfzP+f1<3)_7us2*GfI($HRsN#J#Igcabs zQTq)$R|ss(Q-RZgklld=(`R5xa{25M;)&o!Vd*)sMhW6Ut4j&#`w2OE=B?3hk{?ae zJ-j+yGXV{3)H3)H4#05LnC3_l4P!6rO3(2D7>Q$14L$4`PPb@G!u%?}Ri6TOm4b00 zuOm!cH3=&4D-cH8!O9p)Tiu{2_%Bg8(m;djw?M#qHS`nyENTV1$%I1lp~GKFYluG-vl}RrUqJ;^J~e?YTih6=QB(0dkdMG17V5RaRL}9hDpWK zb$tC%1-fHw1Tc?Dfziq)SfV+I$~|Pj7TAt8eV}Dl(~BfWKXZr4{@K_Usp3u&$d{9N*A#+<$h&aL}EE#FLth`4Dy0~2(o^^(i-4|!#{sg`0J{*AL{ z`ojq~k$?;nPA>t;$!mL_a>lIiG-D3{w$UH%A7q$Fvvvfd?s36G!>$EFVsx>hH3f>R z44NG#^EfLTrSe5+Rq!vs(cN>s=I)Yha&yGJy37~)Y8;+Xa|O0I*ISD;g_3V83~5JS z^#_{)TxMNW=XdatlC5UE3$P~%&%iWz%{Q@4Fdno$AsnuPIi7#)_&d^DHp$s%Byh?>n&~QbQi@|xmq|*jo(}tw$ zH@+I8dnep5Qf=W)pLgdT(|RUb>PUV2;y*8k&JJz&gb5YNA9AW!x_v~0cHAu4Ul&(7 zSziyW-iZ}=Wo9B`CVt@p1E)4A1IcH=CgG7HJoL&mXVP=c zZ6it9ioTZGt#{C6;DZVV0Xyt#E=e^MyUF~p)lPE2A>NJ14WR{|Yr$1DcSwX-s_ea= z!FkSVI_Akp@f4YBuMlG~C7{mRLyYpIe3r@b^2>3($C4YvTy-AE*-!%gSl}S)@GvOR zqrXLu?5kY5BIY&R)q=iWVE^ZKD(^K5q=1LMul2#BRAqk}3;P#h9DmdLM}I~?L#G*9 z1P&^=^HbVmuTG;DHyFK|pTIs)5#Wnivp&fT{)8mnp5@`e10R)TMPkc*(&_E30M|-&6Y7CZnXR2IjwHgtfNkdhY0OE;C>aZH{p zt+Ce{`W0>oRVL@xzhBh28#tMPq%pMV3|zYHkZ%MG>XPgnDlcZ!mqeGke?4q}x1>XgkZqX`c=5}XP;L^t?Ll(do&$YCg2jp18i`J2T<;g(R) zGbe!l>kQEIvx!!NlYBOTV9hhMc7%)spv?6Gjri?Eib_;0tN@ zIxY{TvJJ%8$AFMQ9zW7)zChjI?1m%$83>$8%Z+gETiJ)+x()bVaPFMiW)^zgMylUl zfa|Sx(S6Ul!?){4S*P=aVZ3?#l2aZ(T!QoYw4>dFlPI); zzx#}z;TyM)_&W|_dXqY*J!D*+*pE+-PLr*H%nzKaN4~}cf$ra#UM7DG9<`+4)xsA7 zM=m}Jd9ka>^Dt2E5aXxz^sK!1pD$??H@`r=wmAp_MnM#N{Fd2|Tj~yzhZRMQZezhTQ~z6#k*ry)3rh%jupzu2*P;9N(YasqYe$dejLX#qxi(`2*P=0rX1aF8$M= zn{sd;kYAVFIq-Ig@w8=#)Qe4GqhAdtk!Il5)G~bA@~i2#{EI?-Vy9-jij}j=hq3|i zJ7Rz~SpHi<=)9rTYZ6|M(~FaxAieVB=PJ(#?ZL~ zCx#VN##^9LBwyoN0}<`mdZ&$2C2=t}C86J*nj|iGM*GJaa37Mri$hF?OQ%;dZ!{q!t3;=I)8*a&sXjkDk@{) zeD9_ifuHqr2R&XxO?v_j+B$Q1WC#ZrVmOvs^JPXI%>^I^b%=TT6;rCIV9rvbi|mk( z&$z%IKgz=PI{@{YI|M-4$v!wtZ}eY_E;rss3st)v9su1=R9b#GL)j`obRip@%!CwU zKP7prb}XaO0*w4FfVuon*0 z1du}b!Py#&s_hO%DJ!HRxvygv&+#27 zr)|8~^Z2Sug6EPSgKU43KO$8?Bi4AkG3c2)I730yc{^*arSfHI+jBJK(lcO+R8#C? zD`2ZmAzKtxUr;0?x4foD^Nxe!!%EuZwWn@L><9H7>kAR0xZxMPUe0n~^?KK!r|6D= zKa^vB#w{1ZWT(3?g8W~bvvaKsZyTxs({y%hkml|8i~Zs#CgQEQ;GMX!>6lYiCc5zs z54BSr80rAPO86{&l(stSkbNTuW$ZObBKS1NA2^kG>=?CCv##hghVc3&FgAy&>~5@*XCcU#Q!F!@~;d=^VzFy z6n#%%XG7F_vp?bjs1~w2B-^kFZ;9P56PL*6#Vi8U?Po8kdtWtH;>t(rN!DqM+RpDq zavIWw%R4)4^TiIQA_r4|aEZJ77!g$@%WU3`Tp^^3+@Fm-cfHhTyVXO+931>D>FmvF zeloJA#9UEiZ?d(~!LavRQ+s*JnZy#i$y z06s(`-dLtnyWD)TGs^__sE`$8cZH7CtQ88-2jl0sEdxp@Op%f1{ec8tA|e{`-=I$c z%sZQNm{Cc($OG6(t~DhjiJ`%hOFb^BziuZSVHx4a>eX0~>cikq&coe2VWM z^|_QpEvqpXw?+83TB*a?l2wL?F0r&nn6IjAek82}m*Wap0RD`ne2N_oMEbmzN|PRA z128q~30vKX^rAehWE|Hw127T#8MEt<-4FZ9NRDtdQ#PDRmXmXbJ7vWTZaD*0DzeLI zAh~NhU#9!%>+lxP1vtOgYP4hUJ=uvby?YBC)nlCR#~pRxfD(j)!5TQmKASSbaPn55 znFZ@s3(6VO?f}_workj%8Ze1{G!%$iErCh#*-k~5jABQqBABpgbibJ$Z+U%+;!$Fz z`K*UQ{+s4))Z8Ob=~>frsN1&tF!qQZ)Rv26+v*j*ubB99-es-Gu3ZS2Sy%z^kCK@q zp*=1ntWMPYf~IX``dhla_wz2)9|-ay52&{;?LvMqiQp0Q${%s6g209xFs1>!M!9%@)C+c zi@`o?=U^YLRXg4!2en1f~+EP?I349?wZ3viqxz)aK zy`=!`k6=M}L(w*kHgKSN8(2w|gF+qPf}+cY$(ty&xueSE>Q!AomD@cperlV|&NOTm z*>*zzSH(eVFmlwn&SY*9%|0>lONo{}0M3QMj+n3SiK$$NL(-73urc&lHN3jgml3X2N8bn7?X(?^FP z)dxJkCo3K!O<5RKn^rXD02awF>AR-GxG<1_C7v-4@psrwlZwG={iZFk`&kWkRwq1mK3M*+bxPRU!pKf`QMw1?H=Pkf_hC}E7EnQj6v8t7z{JJ*=q$I>9X1+Z6_EGUI(oc=cs8{R;5 zkPRk99!C<=1W6Ihut@OR8CCJzSGt;K)M<8oaW-RC_kgg9-nZE`ri;Bycj`aYv!bc? zMapc{hb6~}X$2g_E;`-fQgTN?%+4?|SFJ$Wmd_4?=A!io07vXpnJN8y8&ACPFyEW% zi$7zM^!EoJR<$%%n8O4>#C(FbcUB0)*P`*F>7tR(NH+s@QYx>Do6uEX(lFMEPoE!7 z=kGY;7MdM1Dc&m*mE;Ds*A8-^V9*99Aw9+U@?<_^=J{TjZ>|36enAIM!bc>QggCd< zO2$a2=UIrR=Wr`)3n?fyGLtC<)8gE=2~D#ocqyc}OBG2}e4eV;LS3l_l$}1O?Y4A_ z3HL1c&W8Q5A@|P2X1G@wl@1pvUW&a(ylq0Y*14?BIkw+CHowwS0oXODL%0;P#APf> zz^IYkVWeSoalY!sva>&34ei^6PmFn7iGPCNW5HQwOx!TU-p!bo*cN3F14=IMIf9SR zKV4!KcUL=3lH7_^5<0?H3YO)s6s);H@+=Cd;I~eZT278UWuLmKMHeF@u#)(+Q({)$ z!xLxGb$Fd77S$-txe;3484_pDs(!tZB`NVaZ-qM~0@o;i{v3Gcl+mT~ng{Z`VWm@1 zwxOa)Qaqu(iEx*MwFDGUKQei-PbbOb=K%?s#BRBnwKgJiC&p^%rvi-ANUW&3QsVx9 zqAYu`WLS7$LRMY#8b}wt6^$7$9V59*n%Opyj-UQrxuw6j)T`&or(&Uak`Bw}Y{c5n zwG3+!MZNnec}%XM`t5UIqxY#X&cOdvSY{;75m*{)u!UPD#Er6@KaW<}HnE(gf&HOD z_{Y8IAE2awF6~fjIlSPIY|ycos!3oAM6cGTwghUAb6lvUTE5SK3l#vaZC96H@i>8R z2oh0X3P!b)qH1SCZ|FmQh}F34ko*v9?bph*jIFQ7o4m03x)d^PB<@2l=Ae=9@s6vpw}W6oAO`q%yc^gof~v}W^8$h=JE*oE;cz(g>t4@lD7WYNel(N- zC4TQ{xrNQxjVJett#praAE9EZem=_Jya?cRJFBr8*rt2R1tDcx?0s_6`5$2!F<>aqgT{BUuoPKF!JGtK0`6IEXpxY1EPPb<&FTW9ejqzkH7D zM7CxbPROlq-aCuonFoK)E)Y+7w(q+=T|1@!lPebeI7^$>y=$N>0L~BVwt}3C1}EL7 zw`g6otq2~ur$%*DQ-uo(6=hf?DV%Iu&pFq*5mbT44QR*LXqT{TIRk2kvMBfpo`|*) zTY@IXKN*NUj9|A84c)U%YO@kn2-$@nkNXmEl+p`e@tEC^T_-&?3iwH7e^`JWS_|>q!L?MHNkddD8CXl zJ@I1?cbsBpnk{W2BOqOevxUcav&BiSxc=$jJD(UfsO=TY?1U&SvS|3;+Kc2#v#GmE^? zY-%g_$)rE9Izpq_l4?bTjd1T!g;sc`U|Eax&O3LhKi9j|*TP#Ef%N85unMPvcJ$zT(sMMRCh~*^vGq4xb8|^B7hRY-w1}DusmL8$W3u>yt}Ti1gh^|7CPHQj^`R z8{qIPd#G^RdAH0RL&(vmhMxr~5~Xv#7wd(&z5LPx){h5tqug{~F{tavC%w1RcZ|0J zR6TT)h(G}~{*TOiBhmELCfOFe-gj?cQtKD~!Gi3;I~}7PHr1a*>ILB0!~gA)q48A3 z5ZsB@Fx!{F7`OS3{~Pf+K(uN*mDx0G`!<`YdD5BdyGyhO)Uv%>x`oMQtZ@H$+Ho1# zeA#SncNkO0XcPxOip1<(@EeqAxI(+ybv1|jt9t+pj{412yh4jEE@r7UIl;C&U5WF~ zc`D;KlLA+4z2Rn@Kj8o42NRn{J|;5>$mj)~51Bn}2Z_Qw-WAuGMNCC7q?!Xui6?W+ z`4iWYjpLDCk@@0V21k?<+0${Rg1wXaDS7{$$KCt8?HV^y=JHD9gLAILm#OlkE`qx{ z&X%*ZbA7#4v}OMUgC)m9+cjXgbgRZlyBh$wmg)hBUA-dRPnCRrH%I$%-hk#E#%;_& zfv|wlmW-mfsOS5PjJI{=P3EGOZiR4lmnkD7828Sf+h*l6D!o7$-27ee!P2&Qi0{c( zl@O~7;;HcSmhm~_(7YDHW`LXMbZl!M>$>sIR<8)%K2;IK&9L|!=n{DFEfW^A`EaYD zT$X^++7f85Gn?|ok7dYE+1JEX(7RiaYO>n{ovc-pY^Y*Q77eN(%&RmeL?&`;@k}&M zkz5o5;Zf=y*1hxk$YnkL4($Fr^GKBXtC}{!r|Nk?1K;QKSg&TfR2*&;TEM!aMybJk zU>i+kn}7U>PUN7Jg11iY8682ea=H?DlOKT==%}o1=-Tq3%bm^TwB9+NqP3***fz)i z<(%1v_B)`Z5V)Dh#;M!{jR&?cpR$}fQv8S&?F|CF>nq2t<=q3#D$;ZqkXx~?f>E=B3fF3tMyndTPj;g(j3&8jTnAa85q04QeqvS|cI;b6^^~VKx8~pb z7BLiaLSyBp)jJs7*Zi+0Wi*5*j>@At(A(||^-j+_u2JXEEk9129*mJ88M|Zfp{#lz zSOlOGIW}MmN5Y-jrZE1hTr-LokwMOFVoCzcM|R#>t%cg+TF0>yPoy9it&S}Xi2$

yFd)(I8kt~`-KqLyZqIlwTE; ztY|w^n&7n&@xWN*W1j!bL(^33*y%@Sa#1L*11)$!%L1b(nxD7{std);WToT6RUXRSfyo5eJ!`wMr49pU;|TbxrrWTY5^`z3|G^ ztG8SYk`q3%S8(Vil+&zdYC5m-jn^*t61WDs776CWkwKQxI8%VN*Dx&M3AYyEc3k2P z&WRJbNqd2A$2}PGZG4`83IG+MyzorSDJi5dPo4xQ>E?u-6au6Z?za~ zZPK$CeitIAj2B|;5SHv3#ODNmXfcCoI1fV;m_AgSR`#!3&wY@Vg(kiK%A3XJi&DIr zzYMUf>@p*(p%a62{R$G30i3rHMHj!Hf)6=6U*egl4p;xJNvn}lNQQp%z?D%#F#llr zEW0b_d#cd(bQ1=Yeyfx+$pCH*%HiE~CMs*SNbHNf+=@b*ji3WjQKi5jr6_`lnv21% zJ*qDJPw0V*mf4^OC@bf`ImQcUgOw;A&GC{6ZG+XZwkwUyuuJE5VDsvOfM+&Vx`?Cd zssNle9#ONcdGFhga4|=UzCu&}*jEHa7L4GnFww;5=M=m?#}kFwl3|u$ZxBGwlzqnb zjk`Uqw9HPnv$WU$NBrZpy!ADj{#sD!m-X2sM08_?2^BQ={r@uH3ddl(W3*!vdE<^& zpua*v4L49VYXgPRP)*X;HgS6I?0iT=NxL;XfQKV6@<@QBaeM;GMTLzxcuhcvdd-?s zZ_P1{>8pxKu_Tx(zVz+#n8v$c_hRGw6JfzwGzaiLLaYMbBiy~+QuL?Ri{1F-l??kQ zBu0vNr#=d_%*T>d*a<9ll~H{bba`H-*uV@jVRp;i;sUqX%C%+jK3o{%Pv757ikq&U zngYi9YSWf0U5ju1H$-nNHU5x%w?6xGHYv$F|H{EYw4+xLmoE*_V_fGw4RI^UUqhRU zr<1UM<8*SKAw_8c=HA(EE^rfj4;Z^=AJtP_!hAtM?$^aVfyx0EMj} z`Ms*K%QT=chn*fZ%^O$1&W)z9B)wKOlS$<^%-I90(@MPtX%j9d_;y}OHBIP(D-EBl>mzptuSY`Z4tRG(LC8!v8G3z)qZ0Ov|YZcEuGhX6MdOB^-F=%GOb zMC7!8`((>4&Na#Ke@mim*}AS033t+fKNafQZbz4Nn-_SJW31piS6ZW5 zZdymHIGLM!3nTYz85K5$eT#x+?gz+vl>-iaU3bW`> z;ikI$F?^pQMAfVOcp{==rop{VAASRwKf{USbP}%AQp2|T96AC$9QsiY`En^ zEWCPqv@`mvNmXRN?f&5Tz&ILCl3e3bDhGMY^?h)ks}!q5eJ1s_Lqh>sLb=dW4v!#= zw#C5FXf2||vlf3amR#w&#C)6bCtG)kB7TIas3el0L&_J(0&}i$v#OKv^hZWsCI*q> zY!)20hX|*icv=!8Y%K$7r8i$m2#w}^m0 zYU2u*RJ#PWFQe+lymdR`%5Ic9jP!K&pGTWXhvAT5ZgpY3@d?vnl#)J(X>J^i6KSOqd_?JvC&}58>Mt*}jY^P~iNGr>^wXwdz%5M`Wb6Tbg!0Ms6x*Zn+ax>GCDDZge@2t z;ezV}gqSpS?^ES6WT$Gb2ScUqRhli5^>guFWM`|9V9N(}`TE+Ev^2$#qP{ZI>FfS8 znEv*!U@jnY+fWh_H#ra&OIZbQMWWEM?zHg<*9BpY@I)~1CCZrB?F@^;bKf4f4&X&& z?Fee?@`>*TB4oveBw53op{cdu;Qu3=?QdlGni;X`YCT?ymsn#%F3=FHb!T-}Q|~12 z@*zsq2lhuVec^db*m_Z_Vlel{UF5&YUUNr`_L~~s-4pWkD5&LNbuL z5>wP>Qhose7=DZ>S0@S9O8^;dPf3jLFXZtz&(a9O_H|4Iw)*Ga-}kxyho-Y&iz?c? z_zVNmCEX?6Azh-?2;>mZitZ$Jtl` z6{uqKD=l9Ulj}BV-0mCKD%_^PjGzA?5fh=Hxz;y%nkAu8h$=CYwiy~85(H)}vF(6i`1J$D!NA}(~ zYlw#^SWy@x!_kzV$wzuY$>r8Y-5=rId;)IL|RU^F>=Y*xEhjZ`^FY`Dbgc6(O8%NnE%yGuHz7^iNlYlmjk-^p@mY z1P-xa12~NANDj3g4d28;(w_=4cX+7{SSxo^#cKDz-0y?yyIHyW=^2idq<97A{2g&D zUhnT5x?J=Do}I>Dl83hhkN7gBeF{M8X~^g~;k`X9ZK?y`$`LqAx_XhtC)47uczVGwfCzcw-NZak;{23rz0=X=!48xfAg`;&=S*`T{p};P3l~i{zAmribahx z79kgzKy-rd6ntt@A2)ZT5de(BF|Ir^_)T>xS1>`A5O;3gp57v6-F;HV6ak0)llPp9 zb+lIr0gF#RnM<&6y~^y({wK6hdXRF|{W#e&a{z+nUKmCy{mGAb)wCKI@arH{bpK6D ze!a70l?(ee%arVTt6raZI%8}z=m1>OS@ln;CiY}3nM^RM3$PS+S2c*w+9F> zDoGzO8Glel?_OIz=;3X?D1f2Ol**N)^6E(5Ad&TbMSIx3L6-b;Vax%twk;y(bxh+@>_==O>0553>f~h+p@f6Z! zx&OKBrpJjB1E~2IE***+n({vgLD&amdUBc7<75g^zUOm}i9pYi2ZF=V? z#JbaYIcy_O$`W}pBnAY49i#dZMIlK=rXv{*>=kb2(=c*~?q124Ax2e?9V)g0?Xkea}jR#65Wx&fFjmY@P$#Pt^9hGor+h106V$FklfLb;M za*vaD&CfmvcD3!&Su6lnv}PgVcm1a@MH}sWbaj@ z`cA|LPQ4|dMg6a&>eQf40RDJ0!7;MR=W%1}O-K)Aq8A%)IcY~FUPWqMMLrE8SC&=% zwp`A6tjKjXn`-Z^B}(>b{zVO9-m6Reku=eCvmQDh!{N{&$F`Thcm>BbPyI12Z8Ym+ZK&iaG>@ za^Dv_` zW`RHsgkn6CQ@{M^@*3=t{un-EgO5l9UOrc0-L7^0(##Z}MH7UuIH-8~nUaP3b^v8? zjBOJ%_0ew;C=(+Ez(ev$98~{4Cfj4RWseB}m8L9-hG(6Rb74f>@CTlt76zKdBl9IR z&aLs|vi0^?oPOLHA|#z_Rj)SLP_gLQ#OM?=SYBE_8)X@X3)QNk3R@dRCo%u(0WLAf z8H#N;P*AX4JG=LZyr*p`3>5gF~f zJfP<`YfFLni8lk`8@nORh9!`}d2IW(Eu!mj8c?&_TNk01a++j=n`#R|#F!*e*pzzi z*ymbRZu<(lg^Okk5}56x`wc3`v=PO>g5sM={xJN^n)EfcUd-804Xb}klHHxh zf`{?yjr>@zS)Fvo{!pEJh!#&x2yW@of{~nx^};v}yG`@V>Kv036Wt|SDGPMG7cJWK zY27W);9MnHCW8t6$SU$^WImq^)HaZ+%YKT!-4CNM-Mcg|swrR+a02-l-VFjh-+ADi zLc`#)xk$V#+ywGiilG9v2Zp6(8z!>l-^IMQpRG?2`Uikf!iN+{Saz??|9lAah)L)A zx3z{n5gNQk{gN5o4-s!6KZ!wuQT1#4m4Ix0N1|D4t&LvNVb5(yc+K?!8pdgYQohe+ zi8HdJPf`Cinpg60eeHd=fyo$ulSE>Oj#Wl}*Z_dlFSH1is1FH&o zm0sEMJ|e{F{dO4Hbhy0X^ zm&q}vTZvES^ZiAP&ob-LZ*FrM2eKs39;S7x>Qz10R%~1b3e*M@1x1)-HV%ph1b$fb zV=6w;o*#WqxKa>~RRZfOw&uWEsE6A*pLAfCqb7@XV6QiS zbUx14L$NvHzgqmSvw@NaV>KC4JOGfq5`iQMUDXHPge~znI{+@8^Urs7-zW{j!Y~BS zzfoxQQ`)wTMZR~zGa?EyVD&QSn-&#x+>O$kUn!UWugG<#jl#(ifw?B3 zx1VnGm=>z%E7W~#`v^&uh{noIwTH?ZXv6!^jvN4a$pMwe=8&Z=%HZ`BUhr@~Oa1wW z-<{j+sCuy4GAa_r(Tv8$cRyb3?La7BmC>Y*sC!=NcqN^U)*--$UNKMWuj=x|7VA5%Agt+&th+o3^SL&5Ma`8Ssmy^}M zcf8YuwvnhwT?Z{ZO5aJG(;xsbagv1p+4NX=6~#K9tDns=vsmTbKWU-VukrBQj}3CFZZnV7haIl$bz5y#+#4OP&aak* zT=tXeKMy|)6U9MN(m~_kK*$LXkOc|CE5TERzVoGZ{_>hc_I(h>R|`z@tu3TG+lTzf z9$8EhX{a*L6MQh~N3g-A=MXU*a3Zx}h}O~m!S&x4jaCsr`dX4UAjd5%7l=x8(3hb! z?~AxamD-S~Lf9uoz+g~+ZZch#^M1*z7iMmXP2+(c1nh%(L&`9=aJGSobi|Y{A^SDZ z@mnit*mH6Qnr$s)rRZ%LHU0H`(BgL7DzLijGvVGy(Sl#h{>NQXW;*^tSe@4le3<;T zE(UJv)Ug$sz9*{bl=C~;B-(J9t-m|w3jR9d-rByHm)8p2VyWt_PghrPHJ$2P5Tf3e zh^1U#S=AEY^x)ZJLDzxBEndc_zUdkGQvtYukAKN+ij(nUMI1Oui_#GZU02j5MIyXST>G9;LW)>za|BhP#($LfAS)+o9jKPvY2+)}dTG+;~la>m<+>BT@W zjac4014!@bGt>)`*4^>H_@fJc1-FN=z?PIU+V{!1l%{PENo{=^Z6-aBq9B+~iB_H= zLfggfPzLw8$SAiUIIRG2$pI>7N{)${^m)a8fb|F%fu_AnIGx)OtJ6Ia=>8dV-M{GY zJ%0OgLWi9x-c9c|`jX=n7^+JdWQ-2UioHa%PcR~pP6)Y^$PT}>=4|aNJG>P}FCLLt zGTGZU!5;WIx)Su%5#F+;@RemPX+D5AZD6Q=6lQ&IF>P-nauRaiWQlgP`t z7Qud5`>;$=A274KHZ9=gCSrZPfcdo3t#dc=&?tA!W<5%4QDw=~Tf%5Oh!KqpvP!$B@}IS3=kruuX#9N( zkMHithxGR+jd?K9>(I$GQavz5p&fr4T(0-?6BD#-NZr1Nj=0A(`!6CC(H19EHA3%g z)MxxW%173kSqWA*N@-+1zslfNB~p1bX$la5>L`f*s~rALgwWA%DFQti|C>rWIYd+^ zB%cJ_0aFq8t1kom>iO~Vh#d4J#Z%T8ZP!3ky5b)`13-THEUvev?5>{Bu}-b+uVFSt zwRr%P$}a#u=r2hoUPaBsBf&uzW@;b;tY!VFQW4+6tvK3b8=fri*BsVxRZikgq-zd4XcT;4Z9G; z6Q*;5DvB|#9)njT?}2gE7hYnY{`K6IZhJP8kE9L(LL?4YhE}=V+dkD~1GEX%ib97~ zi?5h2L*5#B>8GbYSJ{z($P-}a;A<9^HU{y2kkZ^P8e%!i3`RN*!hFw z^geMATdYj9D9QC7_q((ity=ZQt?MVcCI0<6PW;~zwB9HRUS1{8TwwC9K zLgt#aOY7~Dw*O}A;l%oYFQd*P74+_;@AzSSaMVbB>zjfA!w5loLaGK_@u{>@-A@p8 zBltQRayaA*C<&&4_4@8RRQUuids#q0Z^OhM1Cb9TQ(p4a`g|{j&}27`(9>6u!4Y7K z8u?sRPktmL!4{xJO!W4EvfBxC3vJy<(qIUiX-};da<4h4J%0SH+KaDV8RQpj>wUPT zwzPur(0p&SE>&bi>es83Vv=rsTa9BLb&+@g>!lL=H{TOyaz~!!|C)g)8JT4(bHkf3 zJ|0ncCH*r=zFpkQZbXV==j^Kt#!HXpDx@T5(- zkvlwD)yzrGpvAqV;H1x0uwBQ&_n18vw<9(LSINxl+8@S-g*JRFOgn?4%W78lq<_p8 z!4Z>!omtrA9UfKI4&Q?hFaL=(l6c$;qU^grmS8TW|HxDvu-50p8Ii^Lewm_pas8KD zx6Z0dgP$i78sKqeRNg|Ml(?riYwwB=Z&Rx|K#{coM4ni*^`-3J3uQf?9;V7SEpZXy z{3Hzq*6u2)cng`YJ6ThVkn1V+eBlo@TPb#ORC>BD7D<}7_=vM#5RxFaHsNolZArP? z+Hon9teU8e#0_J*P>3zfr3~XWnKUncT?RWfoFo<7Z!pcY<_)z|P!jYoeUZbbmmsW? zo$F*X>dPL#Xm{)tN=-L2_jQSP*EGZ?TG;?`N6%M@_wg%32kL(gFAsyq9bgO1TLb1RQQf*-Q5wWmTH;#JcY&vFnEdc=B~PO4L7-QH z7sSB9?+G5vVR-U_xb(4V`P^+DF(va$cH&cL@!Vp2g<<$AK#s9a3=h;eM;>T1`8&gc zG)^v&vm_BF5@z>`KU?vYZFsdXFDpOW3~Z2^L0~uQ$0XBVw#I)kp%L|zsh`3UL1(kO zdY{^6U+H#uc};y)o1!^Jwmh2bVE4Wq0H&MyoC3gYk^Q#>M0_X;Mt<1ecp`d{+O!jF zB<=?E`8GFhL`VS6>2KS|W8oWBkVBexzuW zf#x$D89Swh70O1%?xeCW#3ZAg_`+{~}PP*95SGd>^b-R~`_3%cGe zO<{Hl4Zxi)@Cl?^+>L}DkLtLZ&@B1%+%P6!@@~z70+Y_iMU!{NTg(3rD6dhCjht0_ z@pxR1SKBmxy;kRP{0Z!Kr?obqQY<}oZju(lIo9jj>-%W{+-(q%_mLAxkNoFHs&W&b zlKO4pYVyb_+um-A_Qdx<&ZD~IeVG9w6zgDv6vrZtU1t&a!t0$+ZwubWnjei#=-Azh zH5aC~UTX>N%z4%}FEvKC{IZ~#UW1+({;A4kkoUe`a!Em?)+hJ#`z+1Sz+GUxa-K?-Fh8Q>7SwGxxGWB4@ zo2zf=tGgzszbP|i6Ob;U`EaB`o>@q+A6HEYAB>)d*cOfD4(Eebs0%~OS$r>4)}+MO z`{iFJ=o2<57y1*9AI0o}--}R3PZ=wV+#aiOq@WB;0W``OZn6K2B&83dCOL#(#u;u) z*NB39o-LsCJdH9z=c~JI>U?qm zB`U;e4GUK7Tw1;WrZBcw1HllM4;%$Ho$G|fddJPz87cEp=fMGgtgsBH+Wgt^^T(M{ zsqo9qr0n6<+_8=6tay{LKY%v-{b0gSB7yBB!*6i&G?}?S0>t47ko+^eTj2iGQzOuY z1DHTn)^Vz`J}$EL?cYrBlzmO-B?8GVIfU4y@s*aZRq$2*jn}7`0H1%kE!R9znyUlDZ07>CFMacM@9crY14z~#^3w4H^V78KWb+|-|Ctpc;_~7wlq;9CF z^K4wbAY+9=uI3I^eEutzY@^Hks=0Z2A9qkL2%h5%y^9c>gSJXG$$fcKZckpf(KPX` zc@Wz$%lsm0AHAA@Jyw`YG)nS^ch`Zh%}5wX%If{L$bGa1fxgc9R~|eX?uXTQNgA$gdUUNIlBX9qjrki}KRX{-3UnRu^JlnX4mE9fsS z%xxGo8~UOvuKzveq+YFrZFl_R!wcD;@#Y^!_dekzI?msp^~u5i(CS0o|0`0oXk>6n z`G~8G->FN(m$|LxtE&3P9)W!U)b@Fl0~N>-_D+JPb)f}f_Zyf1mr1|}9o}VEL$b)9 zRv0vdCNwFA&H@^dE$GEHFemii*Fdw;UP=*ljd5;>+6a0vq+1ufCa12i29q4HHjC3>(9?)iCtClAY)(48^TLH(~8>y<1$* z>orLkgbA@%SgchMNUX0zt(;i<(%>lx9cUu@c#`WbTYD>&-{^Tpu)M+|*Q*R=YmjE$ zpi=8tCploA9*1=)=^!q4kt0hr<_v}f@QxFHKT>kp5>{(49iceBBcpO)<(qrlZN<*b zasaMHjZgt^ZKG<3-zxYHN!y7d^xt&^8+&4 zEu}Hgwns_cCg&Ow1}}h%|D1FH!6yYeXEg%oRg9Vk%ad^0A-MA^_;Y@z%)0HcE%nA< zS-yUJ#vI9%Uh?kB)!ZpRAPi@~i)JQv>rh~&jUi5Cmw*J}(Hxqoz7CRy(ziYkL0NFm zPl8a72w5w|ySWjse3EfXuH@0Y`3tbQGKTzG<=AIA0~#_`cD(LMN-BRDQi|#ouY2=m zdRC<`v2Ru}tLzHB=f74geZ_HUDYG2Trcx`4qSqC^T`Xtl52Vv@@u8i~1pwq9M30@E z5P$cZ(23D?fn3Ij^M}E9Z{8v<{PNoGj{=NIC(dnPM6*_ek==nZzW^c1qAEintea*a z@m^G}AQo*S7*xg$qks+BYEeny)3hN#KR!!%zC&ui4Fo>mTzGGOjfLZ}`lp7~ZoO`I z;%&eE1n{eVrB1~K)m7|X4-6^hJz+O-T9<<}-$eA}%U4+(#gg1N{lN5@k)_>_Yp7@_ zXc739_&r}6)Bq7iM8fAFx|M}ngr*rh;#0ZAfE3DQ-ersuwgOntMWFip3z8_%A|hfE z#6jBeo}?{@7~hc!1g#=z)2RA7jtJ^+PkN}Ss;UEM8=ndKpqvKp5!QIzK5z_<6_AFa zh=bO|(sjz$?ly>5?I6>U8Oby&l^`BKOJxvH=UAI zSZB2wRN^p`4ccq7>}V zAVRUoLEzDDwNgL!e%Vn(6JH5<)Ky>sl%Le7JU8jE`r%>c>Y2whdVDnfBf`d^tTjp> z<+s%sz%RHz!2&%`bV_Vmx9vVy!F#zag6G56E|co-7o(5SCK=ot*pBL~E9=yBHAk&l zJ1R@^@`8X9?7gFP;3EDldgoobK$M2^wL!?z3WHnvdIbMBWf{YmJI1TXGqCuF*WfWDE%VsdFQk{>w=LMiU{~@%?IRYjBp%(`PQI*(4D7w^QX0;&=3Hui*U@8#j zbYE4q2DY*aB6C(MI6`%Hl4|A~ZN7`dT*NVb?jbuF%-XbUVWnOlq5WWphrTgv- z7mug!)5KI)Y*~ogQu2M%z&hGXq_bVeB2CPLd+t)ima)3Ql&baq#QP{FA!rfKVrcuy z(*uX^D&J+Px7j!&Mwy%?9ab{Qy+VD@->X*IuwS&eM__KQ)LLD%QFW1{xhej#q;}d~ z?(_CyxJ)!3F?diAWz*dgS|JMzv|+a|Hxe@x4F7a&Kq{~)qr$8U>f#T*vW4WQNPrg& zViSiV-`M7)xXK|9z=aMGuwWY_0PVEA_1JHwURNP9fW5#l8Hj6~%9F~&3(@wQrvab^ zvUj+OV)gLpT`M0q)0p%sV>+Z zM?7UE7exJ!U{KMtvBC|V;)ZI{*X0CV|4qsi^fw#_ViGTMuOEO|I>9?GZ%ev5qz2I* zm3e_ysdjn9kyF?E?gS5Dl@}u7Oc5>!Wgo?G$+wigdFroWMj?0-t+L;l-${2vcz3X6 zA(S6}qp@SH$JAaOb%e$vj^g9zN-*nh66tfQb`kb4AZGn4YhegFB`D6%`}RGXC{SsB zJ*Q^Uz89kuVAi)frlXifn5~1!pGZX6E3tgB4>)Mqmr)L*j#mbOcsEi3kU? zVE?HBY9Cp9{(jb-q{jz+zdrf{=jy{5c8YargL`F|?>i`TC(sWkjfF)u4IvI;ptw|| zkoF9t5NnC9P@8G;_jGfmFhKz|_-ZH7oXQIsIDkemQe{BO=B+;EuH6;zhqP`@@`N@b z$6&9qATW==*@(xlnz>g>@k0zS2}cu@)?1Yuak2M8vZ+!y1c#6uGNLi|vFCA$LTrX0|E30S+t95OG@tT7a5Ro(KfICVez8t-ow9{o~Zc2cajSTZ;m zJgyNg{d!cHq&e5@zVpv|)zYFK)9Wd_@xE^w=Zo1ws@7!BP;;-PJ*lHHZ7S1red3DU zSdQ=Gc1#=DYVbV1ZY|q45cUgPIT4 zCja>0Z=w%ufMccryWVS0d|z=oCx&T_CXfzSNb%ww&VZT@%b{^h4Uz#5`B?9_#Is;0 zi9d(}!)&n+1*b<%=fmd(8Q;GttS2f!*;A6=hG_bK*MSerC`KCn79-iqKE9?764ddJ zeyhkXzi@;QwTj>~x!og_y!lNCn=%H|yW+1-qKAeT$)T(e=M!?$d$$hmTOJoM;ihX} zxQ|$qC24Ug5=}Fb)um1Z0!+x}qqv3>dCiiuCVT2Saim$7JGOyac~QPG?pho{RK)T` z2_?4ZeHmF`6F2#p_Bux7VT-C2m^_Nj(;+DVX|@?@CiG0I%}j$0%#M4R`jse5@pj!1 zE$~C=5~}oM=+`E#JdlEtq-xktn82A9zMI=v4Ry`!9Tx)+1=)p`fdRQ zzbW^q&Sb3nEH5^`5yaEvQ}Hu_x%fg*Rv7O0DO^$isZ|F$-v!+0fXL@zli$xqp1t{* z1aiLh_DF4W1Ka9H$y{#@>tnWHSL!U`TdpdyS`{Jt2vuKI3GW2gYb+S|R;A)(gg3CS zd5_uNT3+yN5SL~MGu9r(x+&c2bbW&nWSK43^g`*|9-EF?!*LXafqJO9oTou=*CF(# zw++Y>Kd-~5@(;8ra+O1^toWLiob0MUL@I2R z&CYVLc0O`au@o(bP8qP%+ijjLw0e6-qWN74Sri3PctN*V;Ly%`Nk34Hpo%MQ;2%6P z)f$Gkr%A@aJ%+M3?e?_ZHndutv8EJM(pzmjPNp|!IwwC5=$2d$u&*#p0A8~iH@D5` z=bL+}KgDi$E8dxSZbO#DM@7tP16zMYT<5XJvAQ(82RVCqNLN18&)77ans{|I&z#bK zn(y-FKhzd;-amgDmodHDptm|G^*ns-Jtehs_d|DK@c3+1!)qq!{yzQqAkkuFdYHMs z#O~p>m#>J1>> zNwy~c{ef8QVHJoeyrxtt+~c_6K~v*9`%3T|N!GJ*;v0b7o>pGH81 zpL=<80}*Ik=V>a%JMJt>NTjj1P{lvfE>=)0-(QykoFk|<<-%VpxuB1fWUAmpiJWyjf3^p;#ip1{(BB1a$}sg7IqbNnZ6+r2-O46O74&D4U#yzQWWyB zYd$-KQnlE5Y58G;Bu^{XVue|)-)v+$|AeL|r>%h$^As`yQ#kt+A#h=E`cqSpLTiAl z&#p2B;~7BmjPaUB*BY3A&WvMU6lTEBpTG8bN44Ru;c;Q(Gwen$b?H_fmc)!I4&k+S zlM<@BLmtqxzFEt-!(|4M>wV)CXD7jkg5I2i8Y2bwe{0Q~yIH~W_0J;Rn&k_F7Jofg z3Qu^URLh*tpsAI|ZEd-#vXVTCL)mbtJMT zDelA}Jgs4Ro6QJVSem9+rgwdhyv&$>)j6K>($MojYyrzYINIbe`}qYluS6{uN~2ZE z)g9U;7)h-!!fQ>i@@<*7EnoD$mg$V@$H!=(JMw^3754PK{CnvchPWpK^1DGBb(`Ko zWSt0hHMiGCz47BnL?O_iYrse(NkX}*Y15pyoT!;!tLt6r^e`$TbYEKV(C;mBnXuQ^ zO!6RPles1(=G5SQRd4g?vVwvAeEbyG`9-}Ea+2B*nJMT-@w`|`vUh$^Y89=4Xnr}4 zz0_W#y02BG?CY*7=D@O`J^zpQsY1qk^p1ja%fx#zQuE%OZe_DH-NQQd3eD9z#A^He z9L5ux=@LJ^0t*fC+g@LK@F*Tmaj0}LpCR`&z4XyyeFSX(0u5#jh)oUAcbaCu>Mz{R z2h>;y*;q@i?BDYSQuB6R)6?_qidf9>NFdiVpW#o0Y}i;r%F!JYctN(98(8UiP339f z&sGIP;PFLjTSfTC@^^gnn4Od6@JDZz@4E~}0PsV>3JDxa)pLgDmHJ^KNcYP*S;ZPA z*tM80L#1PWU28DIq5%+SKAY_IM~CV}#C?g1R6k&PtGjh$82dWrmofK%JJ%zb%cAl` zFL4#(?)ggqUcIHmzKl`oep3+O2F$Q12$ZO}IRV4XgY8z*ROQO5_Om&W1g3Bdkqae+ zA4Mk*#;2L;z5L=3Lt(;tqm3_{!;Al4%J?%$;N}CmQ9xnGY13J9Z zp}Cb}goxT-u_oC80`U)R(M^@71b+=?d&UsKZE~o(y+h+#UbkD-pFm&AsPy%{PD6EF z1-R5RS-r&n#KV!WEPvNB%-nVC2ph`tcqkwndv(^V*uaOQXkM9(5LU)ANCYcIS`u0i zA%VUE4LAI=4@(%1|1(lz6>YsYRz1hE2gBnQ+66)g*ZKX#E9Dmc93O^9{ub9C*lff!^zU9^iK!OVDH;=?NEO+k|0X^wTRZfCS8Jj7R* zZcht}EfY<&&z^v=uzvv<_U>OG1;>mB0LvJ54Z2P`r<5y@Uxdi>NPG$iGXm)E7qGt) z8h~qAgbTzxa_0Upm`SNRS&~r|3wM&Qu=KU0vDb$0_K>#q@7aR3m-~(R=c_2@tmc!R z`}9XCSbH)T)tOfZ>rmrSZ7IL@JYoI^>MV6Vbv1X)bHww0HVu(~V z>u}y$KKI69e{Vg_R67&FPa7F&nUt@mpcJEj>S*#folEp;C$TBkBv2!aA_!!5T(Qbf zU0wEKx5+x?<+5dmaUacL`d-Z%Vv2d(oSalzI#Bq!TP-MOUtz6$)al~IS`aU^@)1GW zR?fSNd@@)law0L~z9Dc8ZfmB46P;=p>tbRCOeU3eYT7oF8C+-d}uWQ18Y(B{u)c2ZFs!Im`G~ ztNsb#Ur>m&Pz?-OhzkSL&hN`ZXZGh%#b?sWuG1JK=d$%KK7$pRKWEjOF>>D{2>ue! zT(5W^kMM7xRaXVjt>%QX6-j*?1p2Ijj;LxTwQ|?PMuoe9od_{;H&9kP#r!^y?~Wb?o7CLfQo9O0c2tmLs%g8FPH!=_|_cv_XNNz=R3^bIWVSCvGc;IfOK zmuf8{i?RYo7(N&&kED@qLvLq($A3Mn+R`mIJi#i0KusZV=m!Ru(BInlr~~e8LjezC znBu^|rt+?MO7hiZ+icSiuRXkC6nWDk;4zDPh9b1V!NJ)e<%LPOjhtyyiN>jg0Zj7{^3F zpY@ybmLS$JJX=h;H|pncE#+|np3{5z07^A4^13_m>Nn)>Ug9xzi`LC}BQ3l>*S&s3 zaCY)UxBfQd3AMH_qCGQKQB(kKci{ya#FcW*iiVijyo_=crjNB z?BS`amg+QLkVjEC;dVPL+V$)S!5S-H6&jI_6fFx>TuI&jI7;#O;g@-rS(m13mJu+e zBC`4()<#KU`iL=$F-nHw{Qy!vCzse0Vby)U=@Dy6pY#I^(ln=FxNqj>V>iLvMAH$( zT?X^UaH|%m{rAXOd{(jtp2y4Fav6=%pG0F3B>cMxH`zQp^Uy*`suv~O zX+~S^0HiLhA>2#!bWtE?PDZ=2J!@pvcvv$qMN}Y{JEdRosnX-9{0=s+Gm>$^{Ja|% z{k?U5EVS0k=3|>{udMn#L}!pmF&ty@m!ib*&8%VMR=?%rIAcLtj+5+CN=~ido2P&_ z5!6(?X9HnCkEiyg@irte`^W3EhF~OAomyR4n8`0mjk2zh~kKi&V5jcd6-wT+C^@bMw@FqjC&o@+0Um@vNqbY*&s2N^K)s>#B_5$uP+Xb(sox`{jc~_fMi}o} zOx(g~q<<55u{WLg9j~;uuIT+hpMO{-+EAP}4r^ze1qSCy2X$S#!8B%AXMLI^il2Hzo) z<%*lRZ}&&?X4#E}abp~60$3xbK zh!&_oSg0g{>|hY4{xiOjrUkLCN77Q&81;SC;(E&}=La))g8h zScqd@J8>#|#k!uzv5Xb{6R}&rzJopIYpyWAT4Z*?h-cBI9f4j-Z3kH*!2P zJs}~Atr=%$o-Vi!a~ty-n{5|nQ#@pIptnta5-z)8JOJj^7wKf>-&PQ^N>t8uBEed3ggP+@5*_~ zU&DifH|*4H)rcw9K-D5AZIJl{>~5%5LV}2?(?`nAzzqPsy;c4t<||OS2r`z8Al)km zEc^j3>T!r1s4EXy?|yKFau4#QmOFbG?q+l~$5U8Ac~1Bx!G8^Ye@w>9jJbWBX?@Dm zlY)in{|$Bo_q=(WVtTA{JgXwC5=FUKNP~|?KVFx(gW;e0C zVnJV!;y#DVZbl9SAsQN=0hWhQIH&TYHV?{`(>W^4_Ir~}#M>U?Z#RcuG$Zbh8re-* zQ;rKn+&*ZKM(S7*8QfTQdrTA8sB0rDcGTc37jz-aHfUM&cdg6I(K{NZ{iY2t@4B~~ zH>y0xrhMn_qo10t$oYKg9%4nA%4<8REVb8g*NLMzxrR}X)@1Bu3J+q)jAt_PhqY3I z6#ZFirhgkRm5cI7=%-hi*e^Yz+_U6T*1xgEM1VJ%-&dSY3E1X|WBvky)sQyxnjsuT z7syG3x=)bfZgu8N$Q6Gk#U?9OmORMW zdBmY!tlho=Nmh?RCx_BXb!E*)pw;8{&omjOZp#F3l-%bTt${v^JTFF6;a7`<38<^& zR))K|x}jao?w4MFViV7_wP$#rkbbn&az%0oUvDz*sqQ(vuB$!ib}-N2ZM@h?&QmsD zM(Rn+Jf7Ho>h*^npC17h$O+!?rqIV?1OE!ow{9W&l|-4;6f_iuB)j}plyKLApR}F* zk1J+{+m=V(1cc)Xj5?A=1t4TiGL)XiK9q1mMiolK7AI33vfuU4Pidu3f5~2 z>7t;Q5*7qY^9xjrg_smn+Q)Z22xk}@F87W3e&Qin0(wjT!4pp9%e(pEInt4XodjE4 z$}M>ynJBN=&hyMT1i^!GL`8Kj8>c{C`k=ES7bo1{4=Sqpv0F+0a))c@_g~Yj9*sc4 z=2GY$X#7eC;jqq4Hk8smYIPbf`n%B)?mb_YdON{wj|mniAoqSW+$6~sraEOw3IF6H zb)?pM7mt%|ebX@1jHH}SYP0|0ckU6~H+#4YTWWdxmn108yVx@(w75ato04WXynM1_ zxeajvrw`iaOGpfw3o5hIn}pgffalBnK3V}nkFS!B^kWG)n*7`Bb)?r5rB`K1C+*qT zB($N|Bcm5rruShRVtDKaVhwxny;~oU5(kzt(FO9p|OIqdFkyJqMWOIiA6mgZ$%2%y8rFAO6 z;14RRA&Y{qPtpJK-OU4&agEjxhhYJ!9q)jC^TS_JnUda8B-nB#DGDvD;DG&RD25#l z{U=9z|9Bjw-&ZtMsNkaQpYS8_C%f?c+2;$M*Gqge+x8hLyMcGOq464YsRyd5o&xwo zA(zPElDy`{DMIn71pOV?R*&L@qC>166vvfYpa&DnlEB}JcLv>#-OCbhRIU5T>4hY` zRfFkIwR<)|hWHli|D7iX@B?UBg5A6AFz#W7%|EL`;Jy0n_d(ka9ON{K`c`y$HEHhI zqB3!rO91ii_c<3TR>{WDp9D2csf8qVsb7N1yZy8RT?)<<`8Dx|b8_JLy+Z(^w!g^( zlA7gdr#RCpm(09s;HfRKYW95C{or!&NwGmYu=K#lh1OMIY6zejSckv8DF zN;K;L@U!*Jo5i%}%xm@EyIQ1fWcD}$&C~>JZtv&Tz!k^>tW>uRIi^MdQK$7$9=SR2 z->?`gkJmiWWBNbJkR%xDT+Yo`HeZYjhMDy|&>|;)lm2&3sV379|SF|*@VdFc*Rw^srQC*aSoDIYQ&1o_&=l1qky<>*aL&g zrZ~49u;)_>YYcQ809!7N#&P}5u2!gU zJ_AmdwmzsJ6o~|H&)(G)AT?HC4nl$t);mqqq(hZTz!2EPqL&(C1R-E$HZ$`LdvsOa zZ0?%iNu&eTz1t?$f1h;Bi+w!3p6^#I+}eAKSVQA|T{}^dAXwj4d3bBUvV_UH8N~`S z>nztPgFm444oF(yD)Ec2*=^%z6b%99aJV0#p;5J8s4;zRB;Yuba=)K4BuCz8xnn@J z2Tlu6+T`*Xh_^qnbTX~JY6ZRP(n#>T0dvxY00DMD<;Pfg7;pzzB&N z#Q3$)l?u(rPq8bKN5;u~o$tNV-i(2!pAfT!{2aPhS!)-^|CF_(pulABFwbK>61yr- ze>jqV-=zw#m^P<7C));(n#8mVR=D5?n))P#QDC_RDZPp>te`i~TeFFm!Hh4cAEuc=-GIZOk|wmO;!{v6=d ztO&le1g=m;lSbHCQA6`cdZ<6g8R~nn%ZfTfn}Y76k{}?ocCuS#k>IL6`g5oLuF}`k zRg=8(SBL79VBS4lHxZPQ<>Jy!W!% za{2_)nQ$KIx?Go|sQcS^f#cRLUFi-Scp2-_^@MY z9^oP{YX+FWqG+loXNL~`T}|LJYy^$ zSG%Zd=IhSz(6Nv;ip``esA%(Vri$|tx7xAAiT%7_8SfGTMdMCK+Q*hhha3M@415v# zlfpj{Q|q`2c6%k?<`O_F?VgBVqQWQ6)tO4GF3JdbgqwCt(D=rPfEkbg8KN*ZRo{tE z2oUZqkH+{*wj$#B+K=iEzn`*auX;`#AxcWKyBRBBi)Ay?CA=-jz%A_fpaU$P<#ROK z3)JckI3$BMyQwjO>%ER_wQ?h29E2|!I zv6A0;cav2OV^dc4QA*(m2xw@qDso9hZLpB}*$b#hE)C9WwLF+?k#F-gjW9i_toKnb zRq+oFl_Vr?R@S>zF{jFdPQaxS1!?xO3f;3NtC||=+}7!i<1&KJtK{pAn;EwvO&uz1 z_%di|I>XQzX^(8yNxx0$^@du;{_a5Lercatuqm&F$W%^5QFyG4RKzcrm%DL9=~OpC zU_nNi9t5wbvg(*GjDA@P!x0{XQWq?pq1ZBs-A_31^4KVmAw~`8RRbuWL??#N?|U=U zHFJVP1_=VDJT|spkyx&7?%whyizEMR3A&9Xr7b*;A^@tooKUF0ZMfY*5{Ap|BZbX@ zsU;IZw}F#8*7VLUeY8|%%{IA!DpktadoQ_sfDF>qp9)mfOieOCGb4!w`iJA)#d{+x|d>hyvf~u>j5SQ%c{UL%t1FrJBLd*#uI%?5J0i z9L#w@@ljQ`=w(mVn5LgogNn^0{d8k&rw|(b$$RyQE2EgFn33L|!ed!f+^? z0FXMk0#C$cggTln?qQ1xTcM5;KMRnv2(R!WYD6VTpf6BJ_g{D39!Rviq&jki^MpQ1 zyamt!uxzvuxEPG$F#ZGKBHWoIpz{l005xzH6$X2ak`lotsQ#A9TcK{Y!C53`s?{n1h-19?v5NjmjY zdVO$|LGqN-+g_TYc^_vIDqm;0C}dfEmu97-8@EPRZWnKbb|p_bA-5~MXj`0pU7{P8 zrl1A#SMJk0<(Ai=YutR2yGn_5jN6ylO8N6p8T8D`t2BgZl22$C;+B>)?gk6F`J~q9 z_8fNLg8TM?^pS=C#p*(x6nM!MSOdgExa;C-g}1Z$8VYSyQMxNZIy>ZIdNm5qV|{?; zjOKhYxsn|r$h41Ak1lcFt>QYR(#*Z5pnkDhRBSS^xpN8$4fQo@eFf!PHf-YvGAlfz zb5$S`&Y*WsLnC}gb0~sSlnIagjQ>~DTl%a-AC#FAJUWn;29kfxi0|SGxyujomVUKV zQW)8Y(rj;x3T3?!C2Ik^�~0{oBQm*;KFA@DJvJ6jT=gJ_W>#CmkEmkDZGR^Za*< zjCKKMOUrcOCZ+$Zk=t9H@Zr6(SYh68TVTXoGY~NWH7{;U1HNg?SZ<7><8h~}Kv|S# zM4CWW{xCw|h$UA_P^7=B85+_4%j<0Qa0i|d@*L>07d=Cekv7tkrqAsE21whFN{Y~3 zZ9Q@4H_PF4nvgB;AJ-?jjQJWMhEUQlP!QdSp1kU^vU|AMJicV7vTep=J6c= z{bGMPlKOeQ-S^0E1J#{;4+OCj>)lzYg+b;l(`Hi??%-eWo<~js5_Pz^`0>QUm}j@* zCUpY`zRiVkt!fgw;?1wmRe$Vl^&sj!9OoIi-;MKU4>^r}$o^73u+Rt{){s=ES+XN0 z?6X$3Zo>1KEoPF03E{my?bx^aXwnoTrod~Um9!$^0=|S8SY8TnEx-~kic7da+s>BK zs_~&<^ddmh%Qi$a^2Sz!+jxxk zVw>;uom0O-(Q16l3J?%*(R%mc;rpd)4ekzd&ye9tL@SN96iQa=jA(RAV#T#=nfpP@ z{43Yd!#3x(@Dn56*#^1E2u}gk`de7^|6bX6f(ewUB=HO37RQ)Rd&gQ4oU%9;BS6~{ zWW^-uY^E1FT(Y0oW$?s~5x@aEITzfHOUe_KCJ%5j3PCAo9BSml8k6x<4ZY6V=kLWy{e#a|!P*#U!wB4&~DXU2gK zmg+{mhhNe|!-%VLc5Qni@Qpy0Ms;*;(5Y4cQY*Bw70r`Jw1RHr0)cU)mjwi?J4-a@ zdj79XL-st;FFn5aUmLXSpD~NbFtK{+X-(r4S0{bfhs>W8%KNf~T)Ja+Tf@3IKslC5 z{c-Hyux0D?Ws~T=j6`W^H|tH z)b&(N#+IocuX6%5d8TjVUnyRY1a@ULeguxHzrH-X2f7qGT%(3;QQjd;UDAEHM1WoM zrA0`nKe=p$+Iad+UOP-zY&W`4vdWy#_N{b%{=xc8W=^`5oXYA@{BSzzOmAGOJJc|5 z7VWdZG0ABt!J)6V+@tlCc=X(`DcLgr)xtrgxqq)wN9hmn5hGKaajkdnaxt1Y(Yrd~ zz(8OBdMTOS%p>PLIk#Yg+%AiwWEnT{3BSxmv5k)f_q;QlY%df?^p4w{#h-PX>Bwuu z2SP9yX43LA(?gJqrQM3k`S*^wB<&{@_TQt%SjI-;s{=4IHkR3fHUNs*8qrRBHFghQ zf<5BE=Y&c0#*t{c+BlIi>cj)I^#NQ8feFlY<_11Oyo5)5R)4Z^=Cng@H)&10IX@tQ z5AgWl2_BOFxMIOQ15Z(`8cBS7iDb&r;D2Ib`s3a8BM|^_cb%z;e`{=(uUiXUZb_=s z5%kv#TK-WkI%jdes?2_-ds6#7*E&@1*^qT)`O%=g#n?aF=%u{0(pI1=tixD+aWz-O z*6ZP9c6ZKe`s4FB@u$Jc#vY}>CZjgq@v}I;wV?aSaDCK&h+IA7PK}=|RNVcj< z9U&*|-7R_H2Q61C4?df`Mz8qUime5`1~2>jW>_}=7KbgKrB-Lctqc|qHQrY2NTekI zG|?k!3+=j5w?H!%tvPyxtlkOvFVgl_4f)C4rP7R&XCXNe#Q*^g^!Gs>=MGMsGI1k zG~_ZkgU$!C=@(b!e82xR#f};OWpBHJmET#H9TOi^mt2Op4E#iHEUxLT0x#FThAfNz z_Lv;O^q3KWxm`EwQ!6{IN#~-%nrfW6GS=L3ZT&!UfGDFfqA?oj`n%&u$uiT@VV;8A zZgCk=rk!e#ZmVb3gIC!wB)uBDPMy4}`hLGK{?=@sdCp7zr2+SqK6eS;GmDe3J1a!5 z;K#kf#-+>HfcyVh0Dsub>BSF+arpx0L-3D`C<~I2VSRFfCsCu9{+MJ-CAFiUt0DPc zKVDH|;Eu$~5@YJ~&ATpHWjeM3n-t=)*+6Y*w2gEn$Ro6iqJpbYL}G379NjdijlorE zVL)^}4LlGKyNi-w)umVb0Ed)|AsxpFCS$MaM+REi)ft*WS=NH-Y@+69dLqr6O=?`} zsD3ZN{*^fgZXZzRbtPiO&D;iOVFS`V^c{Xpt>R%pjDD#Gdnzpj!tY4J2#X7=fTCE$ zEJD?;??010+9TtgRpaO}${4Z3n;A(j^&B$?>EcgFQ6+31 zztmB$5lAV72tqjly3sTIC&@q|3A7o!q;T;NX=pw*yl2ev4Q6eh1I-yaJsY=%YJ*xKE_JJjK z&>k*O*HgSn2_k-r`WV~lpT}0GaW1y=_BoKpv&J@Pc1?N$UiAZew7w9t1_xHQ-CmkS zvhO*aIXVHnBLqoK(wcvcl^S2o?dvYmZZUW)?E5&L!7c(8MYyBuGOTMf6J!lJarS_r zD{!ajka~*qn8=k%(0}q~AC9|EprRP(E#O?OIs7PWyG36F5ku0p1hO3C<5uUU8RQ^P zl*%8IJ4#3xEY)z^XuTzPY_LQ>`b{oihoFA5L+zj4eWlJr+y$Spp9oxUSJoJ>RceVC5C70N;%wABQA5qsbszh@#{!;KvV!fbP(gOp^(=Y;GyQf|g#!?;%6 zILJ&r4@L+)x&Lti?-B>?oVo^mHWv_^B-7l(I$L~A#8riNEwwnn^^VQF+GSu(Bap$F zSrl|-yx#`A3k+PB3jF!d@|a^keo73n=Mzr{T<1JK;=exBslzUk|9qs;LYol24GM-C zjboj6Ja`g!EYUwdvr`!x1ai{#mNPe1t3EAAQc2TLr6DrwjQ+l*sax-7Q72JMd2Wac15!#m`h(7>Q8h z!NbV+OrbR$MbQ5|k77n1;YiblJ;0)@TFXRq#Lq zqFMH~CKE*yaY57oK!aIBWJUNY@N&+nXcn`*R!i@owiBC4^mc6&xy5-{n^wS>k6f_i z9LH0juT&v^!fH<>0g;IrnLe6H)uxQtWOI)Yjf5yq>wHP@HnSFcKZds;NOy}*bqHST zAhpJy#D^aZVa7ZP{6~Ki3}O)}pTQkAgo@+G=#l-AsN1uv1W?i}O4hrEZrJkUJ>ag? zw(l)*aM;bd&e4eh>ivlUdD}LYJDN82H6YtGawh$h4cxz_=;ZgSZ0%)B1-%C2tN`Of zCUDTAXdZgVdw>3G>& z7?bCZo}Wm%Aw^t9S0}y<-LI7C?xNY~5f23=y+StsYg6XMLimK1M-2TO@%Zy4BKgr= zg%0O*^bp|+v?!nd+5S1M370{ByaWO90TBv0`kYA{D5$kCU2Ks(XJIfT^fCrACKHEy zQ65S8pzE0C2;^^XMd94PihZr#hlK81aqV=-gH-mZ_2 zRK3Us;~5CrMXtRr^#=UBh|V5WzcxCo)+I;GpZpO4qy;x$ZZ|v}rW40wXHgQ?zAtCA za#G89VroP^kkg1RkA8iHgFDLU36vN{IN_m+cR>TR;oF4o5=c!xMm-Rl(h#yZ!5w*H#|CqJD2J$+G%cecy3$==-e4ug46^?Oft0G0gJm zhb)wTP%5m{nl@=MSYAaTB8}y85T@Kk0Y$QvNy1K-{48uuy)#8ctheJ;FJ1fE)M_W9iyiMjz32Y4I7ERzEjO~ zvF&WL+hhgtEjR(^>88I{9mGKKkD5}_pq(x%`amKus1sKF&+ zWdzWlTkqE4|DhQGs31tNY)`^P<$s5id4d25C=whJX;J?xI0FJR+d!v9!?Y{Igxgo; z-}%#0f{ejbU{R1hR0vQ66!=RCKwFGIDrds<&xa2SOWZX2K2A+LAY{k&bXmG+NuDIg ze1dW9YX6h(3$uMw=GV|mOm>X6@k7d!yjfdX!{K+>Tb%O7fX9(2+(B*6U2DDF3c-MqJJi$zk00Hfjr4SAHQcs%t7)RyQZ^xJ|z6eKQNSY7De4D2{K+J z+W55SN@^QOz7iYTF+o?nLpc;^fYT>&B8zqmp! z`HHbkX2xeN7?n3fi}6foDVu<97G*C?=nLD_FXyO16OyQ*_(f4S#(^v@HD{ZCg++55PUcWHs^^{h|j&W5~qOD}QlO?62R z`hlT4(G^R{rb+>;_T!=Om2zRcK39798qC-B^<-`-dSaI=aS4t4=jpGPKdZj%8xQT9 z@8yGr5}Vcn+oGBG#o0Wb6N2{hNrRw&I65`#0)>(~;Kp)Z2us?4{oiPRA3C;GNdTJ{ zd25UK^)X`ID9+=SZ{Wg;?>ifu=)|;23qy)4^4qt&@vMF-07PAQVy<;V`qlPsW%p#$ zE9kRh&_IZ)fBcN~j5pv7g72rhk=f~jsjm74@@HZ{b7&4wGV@0C@!j^1q9n@6tZjF{ zbf%xgdqn@uVH28*fFUG!V}r72>W{({&a}ddjybJ8AlxHDFfgB&8IDSc-bk$04&3>M zQqWX8*xlxM+X~+ny_=Uvkjm^O)AhzaJISth+yu<`_YrUCsVt21b$iE)E01q7-ze#W zP}3G7)~0IxAq(Dpb_AFt!tdw6H{aHL4S~2)dio#e+HaXL4DMjNfU?#z+&WEvzl4alb_(AI8xjxYjllFQ-Xkqb|lUy=V~kz%x8D=oJf7 zSqXsO%#Mq8^m`2;pF-8WOjd}2`cM1Q4X@Z8A}~8(+hmK8^yidyS^5W|G1G-iY}29i z+1C%>i}k~5V_D;5f1%VthDG??mmMApVMJ+PZt=%9syH0_J)En1L3WX1*K7NCLL4*a z#)awPyOjYJpZ{|g>vLzFJ>_`n1&S1mPl?JI2O{ zIabJ<-P}SxQtSUFjO^U!9rmC}P1ft~XXn5xat~{^*qEcnt+*@rs?fue3qA>a|~*|g+!rYjZ z+y<5%a(XAbirz38RK;t?xAyaF8NU6$2@!qx%!{(_U#{+dWYZeGGx$fG*@Fu2iys$l zTx_L}HxxMs5m$txR%lB=;xHGPSAHWOImGh4JpqMR&qd&E)=p|DMhLRVrlmW1dG~*S z&D;f!y$B%R=!9X}j@8FTiJncPCeVa{*aZ0S$3+B}O@cI}8Z{=B+C@J^WKjmPUk(Me_>nU;KRmMRrddE@gDo4~;jCzTPiGx- zXARD4?SXI`!9Q4H|8|&d_5IN^)u_wkC!}GtAQs)r@LfJBB8f-}S3oDpmp`d<@LU~k zbeMUZ;Ll&qWvj4Jp@2T5!>^th4qiIAh6k1KUIA^#Leqdv7kEPV7oU5PNn({sOn3fF z+xFd?N47gERwmQsoiO&^j*Cf{+YjFUl_mGZRR2`JfAk(nG0`vl6uJvAQwoL0-*enP zg<1cA0UXZ&nAC()=H?J2bHq5!k%rVuGqU)L9tvFYYF+QTB zw(6smcArCA_<*g|x%8hg){QF#tLz2R@L%Dt-2slv@_(5IdVG8NtKe1=2{L;{v{1QP z8WaxHAjUbX_qG;8o@I#n6GK!`Q498T)b2HyLjjPL?n~jc6}F(z z3R>!Kn1Byozn#^fV04ECO5d5?m8rkL2Z<-~my7scdE@O;Tq>om++G`X6kX|Lj2SxZ zd!Zox#WnFm0P;Gpi`?hzl#9Q4IoJ2FlurJk{Pi3Wj-Wow^vpN(y4cITn@;S(*w^k{ z8w*@!g)h0<{cZVv`8Bm#I!B#_I0J_h|M5EV!7fxK>Lw?$t;^Km7SaPjFEx&@{LCUc zeuOTxRO#Ad$d9AD>IqwkuNP+q&ziVr<ap!y`p>6Rr7Hw(hf)0t1g;5iS|1`Q=z0WNzB!Tc>qW=w z9o$VJxH?gTgDY!nTxz~e-1km}5R8g4?LPcgV<-J$vnn7cHaIw0uLk}qxIp5G0Cqt@ zaW!MtYBj>0gq@niX4r#~y&_DzE!ki&ud_EHAe5jgpWwH!8s9;d zjlEDcW04P@X-uDNJC5)%N33xtj}yFi97U$=?C?fCL~Ka?@T+l=%u)f^>~BB%H?^bU zsm9CDTAmVX{ANM?5?j&TE8~imUUsqyH6CGJr|KAp6OZ64C>Lszg_fP=U}onBvL_W3 zpIL>Foxi}1MJ{ex-w7n6E>T{1=mxt!`LIGYPy7Z7DYy%KDm?;YjZKBl)j}H702XRU zOln{bo9ATp1=v46$7e~B_rp(h{-GDclR2kw9{y*9f}xv8x4PWqtv}Mv+{@F{7ot(n zxk%5Z+kl5hioUQJ9Rt-4bKt!o@MPaPOrcO(Rq%WP{^EfrSidX>EXGpE=9^R9ILKtC z7B*%($O!a_OrCndL2>_F6RI_!G-{oXLbLkcws{@m5z0IY1oGBOC>9D&&|dR%E$+2) zNrPk{2EXp2V3Vts$0gU}tiNsxQg0 z)D3XtqLYP*46vNNUo@lLXOI}V$q%^t}{=*a7p_>AX`?qQ(Z>CS4 z+^RJt_RSsK+`Mm?B3SFdu)9lC<61EI`Nv~Pn)6PW_3{>K@H;e3{cn7L~8AH^=WP%e50-gNqL$zv9ZlM?2amxU3 zo-h;W^uuWkcwp9Xq$NRhL!FR4D=Tm-$M=w>ZRn!;U6A0@aqUW?JXPrxd*8HacbF{+ za7|FNOw$W^en5fq!0mdeR+dnjMVdhnQf5%LEC>`Y#Ujn%uIcix2^#u3qQhsD?a+DH zoW?6jTO&wkiu~-0-zZ?_z@N!w)DqrHes~nR0h?tf# z2?pfFdTpy7pWsnnNZr4sc+yy&R!b0#=#^)}c>CyhesZo_)vp~fK<^PdLM6!LTJ2;$ zHvAIHTrNCYKi++uy{={;WLADBo$f=s$jqevEk0R@+bX=INu^TkR)WZ)j_V82Um5vmzWR zE!fqqY~g&Zey*?f1+CET-tQhud_g`@J1mQW{IoBIbnAWVxzrrIjUdUAK6PM=EJu(t z_nNJV)Z!fgqpQz8p>{{;mKlgYo^YkbMS2Fa6cKoBrR z1o26mdE}_%79c<9bk`EhE&A6>JoHL z?{6t-h2#>3OerBD*sPEp4Ri)Ou)(PsUuglUsYx?}j+l7pT{HkW5o^UX90KeR^B<{i z^rG9z^k z`vEY6lRC|u;8+otg2D2yG>?v*eMM+?2i-PaXyO0>i0-jbVYD0Asq+A!yiNve0r09P zaXS$^MV$U?Wcey8CGifSUo7Bp|6WPB?hiV@jPxw=Zf+3D$%flgf0@wECf18%EJcOd z_OS+7=t!Bh*9PN?32^gqq;LC~o?>!Zou6<7?X+0H%a$^m9Fv5Uo*9Ne4qNvbqBH7UxM z#o4wF^d}hTsTJ1h*u_tAu0AM1j#j)9iie<`xX#PZQ zk@gso7+tc2E8YTEl!iwAMU^`@ogjq zH~%g9`ogq&Jif%FH^0m8FNbGeF;5eC*X0pt$PeXWn~g2#j+%wfzAdob*XjTk;8vhS zt`~;2G>aepO$=NeoU047&ArA( zgC5y!(eM6prCU{^>yd}i?@P~LDbfRc%Z_Mnc_6jjxQCH96iBa;(Pu;tSZjXVZ~Vh^ zQ-E9K|CFPTBSW4otKEQTp5B*gkq>1GV~<(6;}K{2~XcFAYK*^bao$l5oWvB z(ok3Zj@XH!M#0P#+XbDKA217;s)0J=?bo1L-P*6zc3gsIvEnd%8oOOz5VI*cKY%v*6g!VYU z4C+z#GBg+*Dyd9xzE<9_`irb^r$;l&SQ#}RKn|N|Dt3M z%1>{N5KBIiS(A)iDhv7ByNQHJmkexU{>^T~?M`uzPrzd9fgg9ZSti`~*jHcVPy;|G z*!iSVq9Ms}1pB|Fs>8@Io&$ti*~Z{7#x6vOk?Ux*8#`H@F$-7;37QEl(u4rq)l7zJBhFW$XDqh{*rzm8L7E^A^jeqN$9ejdAj%*ck_ z;z*;jj3irh^=?ofUY(0?TYnqsoRM<#{i+}M0w4HjGrNz1VTfoL12NS^s-DnN$hZl2 z+{0UPeuoeS78UV__hQ2RWFgoJwPIr;9a?6T{e5qgXbqKVe^Y~7CH&3H!~w_Q>&9hT zfK`VNX4zlffGjNS{y2fQ!lKm;of+i4&?qp*4v#U*5f ztjKA*1$z6f-;^lxp42?!3h^_-x8N>*b546FLdpa|M6rUPcQONG)lR5*3D*M1D7}{n z4sc!>l1ro;4WhNE=`3snj*jufl=mG`P5eu5ZaoTIcyimC>HI6c+fuf&kumsJ<9jhq zf}%!x)}QY7uNN__HJUI@BMSgTC}dm_c`q2p@B>f&DpNllvEg7FuW13ZB=#IdUK!t9 z*1)p9`Ud9&)m(SH_t^6Zj5j!Iw}aPc`)=|Un~Qi6bc69%+M=fM?On)Xm_tC9I6&1~ zrsX{{b10Zl8l%j31K;h)?9y|{l50?FIHHCpP}Zjft{c@qLKEdS zph{uTYb+4gPs8814jJ&o+;Z`J@m%$a%t2V>wp4sTce1gy*Y3`4$osdvacQ$+PS-s` zS$A}kn_j+!+_{4({gVZ4T@aQyp16#@hFJUh*eba=cq-9)8>LQH;&0i`0>2b(yciwz zD33jV0Pn$*4m8t3i%dmqZKq{#IH;6!5)>Ky%o%tc45grzh#TlklxH05`YV5XSYUb1 z5WeaDKLXJXg-N@dI5~@3UcMkrY#oi_u5IrHv5&?aU+2PB4f9P6bjm;CBR0DPM;oyz zG<{U4tv?@bvFnjG%x8QxrqPJP2J4?PfG_%Px85z626`TNci z;V2zD0y#U4B>QY&tBtxZr{5{E$d6Mxj;Wf#&E`)-RvvE`Esc;Un|MasKD39u$ogp> zec+LEzepuhXL$j$P2m$f=6HToYSe;f#qWutrHlX(MxEJ2{nUL-^48wK9$)-7cK#bJ zT9-N_@f40HvQ@=vRcKajJ{5eF_pDuP>|MczS-qE9`=XAqcmHw$JFPa{yPM^Fm7!Dh zUUV)ubly{3&%68`7X_aRIufjR`Hx@>TLkAd)MEaP;Y_#i?9SfO7yj$5wDdZAu67>r z(OtPUX8C?Xuvcwr>Ex9`3FRctV22H--=xm?4CYoPG^o{v#Sb6ghtIXU8fjv%R1eq{ zb%#Sz9K^hVK)9XffW+o>#}IR;KFd2PQ1(81*!uEQt@vD&bsw>y?-J%W#fOG}TFMVV zM3nu2^6C0(^5-EKh%UF+o31O6Jrn6LT`&R?Jkyi`QP*xz8d{v1D%fg(*v|NfuDx3; z0kpS56s~LV52pJ%l%@_#OuKx|Db0@xC)$>4xW}z=wA2K4QT&}jVGjgHQhqpIU(*N^PQNu!SJH-zPFz*Gh z^EW{^Z+s^6gRjLtf`k@nBXjKfF>=*U-Iv6> z1f=617we1|^F~nV@BEMaM_UTUDcii?pq*}(H8mgIu(v>+IhTF2Aw9(H?7S~^90*b$ z#Gosa{>@QT7%$#FAnP{pF1Ku~TQv3%UddhaD zT5o9iEZSylcJu&~m1dvywD!R-nqA&wq0wsF%iIm98P)sde+H?Y4vXx+9RiQQ?OJ6% z^BZIBWL|eUpPqH*%8qegE!;cycdxcD9bal2r^aawO8DMybxY%l{(wn3F^9c(^WTz# zmcBE`qAXu4=X)k;Z|XmT1XaJ7kq=a0N6Z*x=gQ?+^-m8BPx+9>U4)(%`WDswQivS} zX3W1sAe)|7e&6&%UvOin+5qxBc;bF*UIy)imHTZu&RbYAE`yf{+INQ@)O;3;Sqd<9 zXaPM{*%16pu!g%w` zC*WeQBabc>?BHuWDx^cl(}(O#cb|7{MVQU)5zt6S$84 zu!V{*?+00`ZDtU!BQK-;P7Y&B1!L)J*_Ty^>AxpT(63|;o_>AX zt=rwgVbsFZ4may)#^2_x<(22(kp2v2WPa9VIeCv|ZTq>$zQ#z%jPywFr_{+n9M0xF zWjlSJOuK2HNL}RQsWr?hCX6qoP;SDl_>=p!%Dgx?3RgN!Yd~^QTIN~U1=;8`cnR?t zZ*d#qJj%7IHT+RIs3tN$0E=w9o?(~6DgbNLr>xD@0muDMT;H_P{Xj!^L#Kb&Zui5c zWGz*W+q3HRJy%XSIPECGZ$;$Iq~DRpG8%L0RY zjqNWT?Q=av;XU$voFUrBkBfu^uZsBoG?ToFfaq-G4P?;xs$tQZLv|;^lZC(=Aj5$}!=iW$e3gjlZes)M2OMWk;Axo5Ek(Az*G9lzAWa)UM~4l#Wt08 zNM(((@wLTiQdY;s0NZ~cw$+QEp?4Q^uYf3K=lbdWsb96{>~CG_>KxCYUn22;B-%^` zv6NQ_-d^FKC48E~5zIh6`clVT2G!+q#(BtMAKhQ|^(FAn8>KxmbGEs4a&NTW^&N=y zxNGLm`MzFd*Rb3ku%c4y;jTM8Ip-X3Kds1Cw+5fAOJb;^10fUJnHI0n&SXlzqb>Y; zs6pVX$({zrmZv!1zm7hqb`@)i%5c_k*7|DDE@s2#X><#6TJBO?P|#7JQTusOBhytR zuO^smin#iHc}iK?F|R~VI>Vw+nufGpd_WCHn>RH)+X4?oO$wR_pv3uB%AGDxJ`fJ; z8vhrF3V5K_9`6Q%?!HMD(}#W&s4eMG0->Uz3+N{mR^n06ILLg1`gkvusH)XnF%*tD z{6u{DDeS_DjZ zTm$XBLQv$}w24?2fIh`uzga5sdw?hu2=)cuRi7gxLGh5*sWAPzsz?Ax179(L7_B`c zjS$9Zj)K2GGv!*lDMTbBI>RU|hf;gf(DitxSzejh$;r1fy%o(Bfgjw!2w$na?RvV% zuP)A!J8?Qu`C4haq`@OSZ*o`XE^}w5;g0xB#N)&qwmc=(rOo_j*X?S^l4}9_?9b(* zzK2uaMDmn0|27s$nFo*jEqBo*5j}2>7qt8FglpK0 zzsY&B*DuE!qFnZULy?HTNGr6YK^nZar6`}HBgX}-uWXj zm$u3AkA5NRNeYBK;$6uuGNkb0Du$ak#2iO>@DXQ-BU)K%$6pK*dXG3;cXSCqwt^1M zNai#zO}45;JL7(gE=JYs<*T;KRniZAks|7GRWJLyA5TA%i^zwfC2uxq$lcSN`62?J z0Ofa~ki^-CBFggl#9^{w(0{jO8lu_3V1CM%<;;3;4P7TK^yU^n0sBbJ;wQ8y#$`z( zWrl<`@rY*6DmI3$p}bAwQeo`*z#K43n~dOX3E;J$9Kdiqbw0>jHVKJ&<05+GFeRrm z)(m{$zVfXodrkm3Hez#k0>Q>t(nu~?ck zh}bkjTP=6u8GGn2eo_7#Rud%!@FB~1DRs*E0zp+%wC}*WF&xC|7X3%lXV8G%!xs)u zTdrw4`~2!-X>bSorfL}fP?_q}M4`I4!_6|V;M9$smP(&^=V{if8E@rC9`BtwCcUUD zNWT3?PNh$G%zd`?9n*=`^>7aPlWz0rn@_*>_;R;rh0O8qqRoRgn$hoxVGUgXbK{WjgPnE;2#36rr6S@t&Xw>^4_@_lk>!O0?;sSJfb3EFzzGc2vOE)1#xclmb6T|2 z76EA&bmk~U!DMR?3E&REV;cJ&=@g$I%7#RN;?jjJiJ;SghH1Kh2t-~NM?$-oJcrZU zSlt~iYa)Z8K+_O(FrJZwj@;>j1Qf6cjFPVELZP=<$Kf2WbS>Sw4uV$E@GU@009odf zNb~rk3iQ*hSjgPB74t-2a4e+W0IX~ogc^O}St!RkeYsQR4Tx!ZaG~h@F`Z z3i<{mJneO#Ap@7?L-^G12){Cc#CJ+l(1!U38rtCk&`v>hqdN}K@`vO~3%4}t%^zo- zZ5I{>Q(;<;7w*W{q;Z=-->f)i82rxU$ zZFu!DqDav8b!bI4ZbL{`K6fgyJ@WwC?IKixJbw7U7GXu-@5wm97`P5Jp&pvU{z*8F zU#T6xa&p*D=&@bpeq65eS~5foaro-ncO>P3AIOAaH1e!>K!QoRv$IMR{XW_OA zz~IlkTkyM-g=`!B2;CfjU5!`1pxK8c32+&uY-!^49vUetN55PCh_GnhZgquF0RU+F zn+I~ZZ7ZaDm?2kvsJV}b_MyQD7#Ia8)_^2INCE>>SX)K8eUz(VT(Ajs&$~}OhY$Y@ zKa0aAeA)Xql~&Y2TYyi?cE88=cF&rsnT5UF3v&%spW^~{0urB2b{N(C`HA^fdDu~< zj$+xO$M`c-Qe9vbDD~MoUXI&UA0T z7pQ$IMpP|UEj4^G2IB*U^34!2>`5l_#Q#pceWHYly`wF=rA_Zn5_J=-UvWh`ZN1Oq4*=1I8?E^s zv9%5TFsiJYn$IA_fAtr+W?zI}O-b={R32*C)6{V~q%94BW(H-z4Z=dRTT{0`TWbH{ReDms*h z9f63QTWb{WG_Gr%7OxVyaeEe8Vbe<}XN%I=6g#NhB z7M**12M}<+(X`c1QIv=<;!%8t`!(xE`ZATaz=UzlZ&an(qoBbzG2emYJF$Cn8DI${xdGtsv- zfyH{g#)=~6@qq>ar9~8(iFMgv$OCDg?y&P)mEyF!>r{lY>A=K4+WY4H>6;;W!Gkc5w`szykahbg|8(s>@H1HlQqLYT z%@J~lE~tY<+%kD;n9*5pgBj7?w6C|$LodzJA-)mvI;mUjX@)&RW!uN|T|K9W(iZ!* zmv7SFk?yyU^q;x;_W(|4_A=G!{;?ji9 z-W8k|oXHM~@k>Q+YW;nYNf~d0esp`mTycOaUu5afTNvCHKf`(@S3SEYTdKMD1!UCe zewfaY7Ic{^+d_yrXe1%@ozUHAyJp~+HP)armMonjxRlFf`=Y3)KL=o0$+M!>Md?3b zwyM6O7bFfl^WAvjdA*Q-x>poG-hQsrwRAj^QM(B}57fEgez{@e5eHdaYlj|nq=6OoFGF$uNFufP7mF(dl|w0!otvx;Nwy6=7%`qne7ab%QkurL^PWNwtX{WtVr7$e13$#yz4d2Zr5&NPRhcV-+5y9RnjWHabsvxpZ0@FUQOel~E{@Ps^t6)2>d?^K3jP$2c3_K{W*)aXU>10#nk+ev#21h@DhC3ja5hf@Z)I`68w}|XfX7NH{KikX$Xyw19 zQpCBY$Hvw*exDq@mtS3Qv7#EHAd@Y%0;`X>Z=4m8z3hh+hQ0n8G{7=w@pZ;sEr1R+ zH;Yb1+46W9MBdD~9yu>Jy1v8Zc0oI08rWrFHkK}1&})$%=+mnmb}fDsfphy(18Awo z=5!JWLK%wN_6^DTXMb#@`T@M5sI(TJ1`f7E&;#V0tbI(!;+83EytidCTOS6}JASkb zjztnzVO_ylQD_*!nsokV z2>PNFB%;0~nv(pfGOJ8BXMF0+of~i$Hs;rj`vN`>QG7hvMBm2ipNU;X6v#j-(Qs{D zR>5>8+pBvtk<9rhqO+jkIEhK}p7oH_I*Stlj-zcw05_uk0`@4ZuI{wO(XwGy7X%+; z#L&+!a{OHr+Y=loJ?V@xcfrg%)y{qVFx*8c<|&OpVfl?0kL~PJQ7l3)`&KRnQSwh% zWc=O1D);K9jae!sq^|=fxJ3v&4%2pMd_-s3YNoHuh?Ct)u*FC|aEJHlkx_9JXM00E z(0vV4bmxq}6|?JHP~A25*Fz&#&aT9YFXh{|yy|OC!<;8UZ}Cj@VA>n=*v^xCa3lWn z+Z?dY`yunJ|3lMRzct~1ZF~a+X%z&KRHP)O8x-jVrALQIOq!8H1XNnOQBvuSQ6f2F zbi=68U4t>;+2{IR&kwt<{Q=uK=Y601yzX{!YuM36te{&atj>>ekN7A-&V|G-G7H+_ zxA-X~Ng33ZkX>g(JxJH9SUx8~V*SEpF$W!(Ou*6Ylnkaux$T{LyJvPE+Iyd_(zd6n z%!}U^2JV_7O%7m!=qLb7FY5P66O+58E^T)_!XvDKbw|OF3FBcy<<=h*#M)ItkC>Fj zKV++lX)y^3y#qN|XP^JwcqR{l|8j(+yk>QgE`R;yf5sWycX{}GOSkGz@?Y7@1bAcN-5ojE)X?=zX-

w9ua0hASR&6)17uBS~*hV2z zy!S@AIK=te@!R5TdT#o)gHHaO3778VEqwWY_|bEAEX~%Yt&y)2K}gvHq~Q{R#;hmH z9e>NyPH_ zShaLSEn+HLWB2s*8n6)gdRC`cG@ERvn@qVU{fqWMkx$!cmWL^Ojl6Zrz>B4kX~@~; z3!gybJK@8ygamTHYno%D-)jGXJ~?`Tv5Ca@zW4iwJA7VS$_I9YyeCOuPY%?8Gydx` zVeY=K4qCvwzmsWxQJqpp=|A(&q}t=`Z;{;1w)-Hj81^~z5rQtJq%!CvAtw>+eOD!z z{5|16Xw#+8asJ>#!n@TR$D@|npi<|b4szGCyN`TeuTbOFC|l)uBDu@I1t*|rK}Qoz zVx210o#QPGxhtp=|224eNXfT+ZCiZJ5hj>Iw^>>O={CJN&|~bi3zz$5*z?;Tr95j+ zsJTnU-VGiF!(M&@GeOu!>iJ{k&`pRWzXe{}D`{=Fbbo*t|lHI7K1slTvKlDP_(^(&qPu z(ubcd!IU7wZF)h|XoXw;f!pd9l9+V@24&T;_@k6AKjvJkM>k%0>jpJeP6NrjVQj5ezw9{1H7R5DTwY>PzZfooAz2y4)1&AEt~HxyuP?5lCK{rqH$n_o<3 z!{pxjMfs_=lSS}tB>aZ_z$>ZBF_{LSc+Ldev&Yajjm`a6lgB0sFfaQ|`?Eg?)YXuiqGsp(3Zxu)mPz1ekQd2QJJ1JWIOy`u|_QGd@2^bujSLMe|?1rZfm_>Jd~9zAAW^jT3TRX zNYfG{i`hDf5$T{`Itt&Hs7m07rLVN9hzVZWjMsB?)MhwRkNe@UJ;)fG$lO*Wg>Eab zlSlMtOtO-uk0sC$_H)FtYZ=SzJ8E;oX=w&e@f`i3SJc6{N0-4V;w*ObviuD&|}b z(#VF30|4>>)wi$TIV9&58nc;JREFUfM*#}3W2Ntb_q-J0`yz1(Xl&~jYp ztVd$SYGXRmuYl_x9n^pPI;0Idametg16W|jS>%=0=g&O;6b$ztEvbKT-Pq>++trDN z+=lmt?GryOU`nu*vES4pV(GF+ELrq#Ri-MB;$B}PxfM7j?EQXpiy+@r^7=-`v%7k5 zN&9&jI4KWqpA>y>$XL{Nr6_B0Yk#M7JG&c{EOn}!p?>DJk&Hc1b5B(?fPK!`Yna+; z=I;BXvUcH8|LRBB&1=#50sFZAo}_Qb@;OWgIk($6O7yYY>>lICaVe*pY`YdB1$Ls< zEz_+B?`0(`3c`1=C=;GStvWWZbzx@A*_t#ksC5^?FkQ_e8)E_5xk z?=-og#m*Vqd|Wthw@*^FH+>_!TT_+J{G(pcE6?|oN6NU*2rh^2V99qbb?C=V zsoQ}EdN1LE&$0zaw6VXwXi)S8F2E+s>a(T0R&(mo&KjmVWH2;-$|nNs7XuE5&2q?@kRqfH|^!6>dro@PZM;KucjfkajyNJA2+(T1f9>wkOB_&b7{#It0hfO?sNz(JFV9Am_-8@!mk@mSS(3{nFTLc}CK_kP`1M+#V z9mWDFedTp~;vPf0tJf2xR1ml*wfCXRf?CuL@NnWk)v1Su{zEN!0y;|u15#IxLL*sg zsj~qMDrT@C7$WjWL$1m62_RbUWI|ut#dv<;(8=X*j~-jSrd};u$CJmpZn)P_kG5*TPNP$ zOhjcz*}>4G>hEXbQkm??Y>T^*6B>S89pXRm-Q%+omB35>BdtjP_KQBJh6WioZh6-q-L0+xu72Yg z^%j?%v`Tg+{{J4l&AQ7jOYpjh@o($d=QHf^k7(WA=+>rfOu1$99|mpU9qjvH$>-*! z*Od0hb1wZ0$l8`^wVlBMT_W>~8R~=7VpDHMmE|y_)uO4p5V{=9&I`VG4Uwqs)Zn5l z?>P;(;(t#LXm_cIn#kSh;T`4rgj2h=tv@yPCG9?%n8$8!H2BK^(AEzmHBVI7q?-!olpknRh z5yNH*1lx`&X*a1#aLb%GM$Wg|jSZc(_X}EXlm=Zd*frhYo=r19hN@q4^&<(U;uGPf z7Si^Rl$J(KQukQjF4p8ugf7#TTde;=C$`zKaWvZXo$nuySqdXE?Mm?U&BvTN8S4DR z&vQUjXiMw#Mp^RRi#xBSd>D?1yw6)AGgsM`<4siQmc4fQ18v_((a~DR=uoWet;f9< zgwau6{7-!5hSLjFpL7a3xY)4_@Z9sS&%Hbr8lmShWev&}F|E~1dKLOGCuNpubKRiAQl zn4!(fKt#1~1rhUkZ{wJoT+en>hU|c{d3QmZMQo$X7lGPnnvv+6qV8VnQgXHN1;>(U zKMg@%3MmY%x)zrPS{|3=<3(3;=;Ij>ag-vK%|#XZ-@khD8THO*F0vs;OW8F>L(^s~#9LigDX+b=KQid(}d3kvs_Y7^jnY$6WCE^uYJu%#3d% zJN@wW)~>~!->lBTyg%}Ksrt9{Vp_(mB}EHS*PBg+y99#eH5ta*SK{!d<|>JxwBAMW z4Sz$)kH`2&YOVF?1nk7^;&|25jPE5DJCyb)1-apNo1ljS@DB@@Y1wf>&IY%Om%|t{ zd^F!>cf1Q8y638W^|6mr@KNhE%|HP00=^o)FatWlRZ>N6d87@g9gG>*4O%+QM)Ony z7wBnYqmRs+x59f0U_si-XHgE)d5PT^nY&As*b~`u=~6GhI-(7aC|OP)!{)Q3`FUBd zL%bjJ=;LV@@ZADE!q%;VKf7u80vGhrYE<;@;wBM){YA8FaNuEiVBceP>4S^8jK^{V z_y?eNnM=EQPdlHR@xVC`eyqv$2r_k1<$kM(jsRTW3Nv<86&qOEcw9#6$;T{t_vCVG zO{j$)*-l%18y^=}aWYb`Bqyw?^$(5eFCK4=4P=1&EBsv;c|5Mgp!WH{ zM;}{u1cSTEb^3Hm;lgynE+%L+yz4EOCSpD;lk_{!o6Jw0#6tbsfrtLcK9I|^)&KMo zAaN=IF!5VnccA5C%Vylp5e6+;xZa5J`u@JRAidNNjhSw5+tbs_FeVIG#%DHKau82s zU-&`C?T-b7LMV+kX9-5E>B8>mJYad+9SZL7j$rcpplkACq(ir@?Fy)eN@{WPIH9%` zgUt@9&4^U?9DgZ=RHduX{cFeCCh=dDPfq^&ROk{f&=-#9Bvbk?E^F}@@qJ?@Nb<(J zmNfA~G8$CY?d`$gZMi3BKbUTMJ(?E4H4s{|l7Te}$al6B8P{5=HIm>zTvz7W>T@t4 z1lojzZK5VOvrm+=Cfn>lLju0@SUfglUaSc=*_tMrs;m>fR>vgk9;s?BNM z-OJ}ddWlnaWWL)mx|pDq-GsrZ8U6pYedr>>h59wDY0mOv;#!a7I1#-O&^fZ>1pOHcfZQPNcXk?-u0#d?YQ&JycZu^nb z!*qnmQ`X`zc4* zltq+9b8_djX^+(NmO%ylu7v;rE+$bmO{q?4h}LRNNq~^UxK>)p6sf$ZhRh)ZVLGb3 z|59-F@t85YnbRfMFm5HTCuz;e206)8B;My)pi$KSuP|)lSi?gq;4sC-1i6#_`3*wV z9@8T|8Omq(MDA>$sv=3i-mEQuSxgTBQ-0`S^R6su4IzFw51a8s#G+$*~NngfIq2oqll6S+GGoF0;{J~&B78PoN9J;3Nnh8hRK z^QmZ|i018me}nq6Q}H?2?XcXZY7sIHf=ajp{y2}@vXb#Hv_(%3xcQM?X7+_Y5*u{A zblBD^vqJ0Ud13=zVDdd##ZB^_i7ek_KmGFD5-UQ@)*bkU;Vr;+o$AZI7B7s&ULySE ze0MPh7-lE8!+9V2Ft?#lJ~_#kKJ9B2$fa$z`Li{&^v%Y;)a~in1&7?F<0#|QE{LwD z5!c!LrpMX@)2*_~y;D%eZ37L`pu+nf2HRDYlcsTG>)Gp$*9M`b;erB*z}@(sdIjSr z@J_#^kXMUX?+SjphfhDmb%ZC%lR0MqoP=MAyI2dpTYZv{$=>0Hl2!9)>VeV%ye1vL z_IptEcGJkd8g>4!^d(;FPzwWZw^2xPDF8r|r&PF#yY|~!)>GsirsA%dn9Y$Y5~Arz zFm!Vme8*11mjO5xd1VY5B$ivnnfSPi#zamiC?@_ZC^+jFK;%Q$hZyW0u) z6l+bi*6EC&I@uM2VCSh7sS@(Mmt&V$mu>|h$f%r}=;}0$56nuQTWmp}$R+|cp+D0r zDfHpL8WHq&QIx?zXP3@5wp1ixki7BTwhcz;0(;^dTa?lLLod03fpt2y4!sM!57Pwl z^%Hf*SaehTg-*Eet5j~USKimjm`<1Q=}qvQ)cK1<>4JkeN2tp86h(;ljidh?pw5mt zXGA*SDfMmn&vk~dO}lBZTO&MJL|c%oTak*rSUuo&h57a;WQEjXrYpXqV2A|uj!G@^ zeRyM4>-5Q1kFa-ovh-G4MGnVR#UeSHn{8xzBfg-mF80lD3yd4`jiV{tagV|^iu4RJ zx2GeSEW1Z}yG)r5pck`>uWz8(#wn@eTbpuskUVfyR{apS`SEx5m`5Ma#*1;4Z_}}* zH}EiFK)gX}z`vSw*Z_#(|%6swMoVW;L>!acoHqNCD-6Qt9SORhxv^DOa-_!FLAH3 z>)ogfy~C*LI4LWdh|zO|lNvRJ71QeAI<&6wj^|~Rk>R@PwI5^`RS@xX1@>^+tUe4K zj&_SWu5KY2*wlo6%@D`)5$-MIKFMm!IH11^zDfz^YTg9U(tv!RO(cdkOtwrZ!mbO& z3#qXYYz^#M4V-@HGof7(d7mL;hpb5gqpCrxt(k2ThiA=q0M?|lv4V)Y+h2y6-4(~~ z(0(;~!N_a7CJD>C5=BhV5{~D<_8Db96dN2+urXkPsrw$d_1Ncb4XWPr#Ir$N`1;Fe z4Nf`#wPnLMsF}-FgIqx2Tz-mW}rwTWUgVF(pel_s-X%}^HW`{ zEt^flZp&C&{TM{Z5A6(1P$Yt<|iC)h2git)r9$(yU3x(TfCiqNMSH{ zTZU6=9;@FYl<4+R9;MT8z_nDm)55yQkc9sWet>Tc#MUx)H|H^4^ zbkD6-UD&Ys7WJ_x?^@l=&naW2`#ri{%cQu`T^DU=x>l>T+6lq9s{J zccLVvihO7}$PO6pwo2x;5Ar-!U;#>CUp;e5)=vK*~vS+*}bf+egzOcs$FKD zI!vD#eaG%fjb|lNp@C=ZU_&Lf`(5yP>5oKohbFrRR0R;d%8^;wGu?G;6Md-d+ewu! zURbfx`z7uXTL?aT2>Tt)(=fX+%VXiv>qOO;ald@5tn3MDJ(!a>)5X&p@*}GM39+f| z88~-J>H-yY6Tk6S0<*0YD~Tg*?W`!|1-zXe%d@aWUjl9T@EcREEb$cM6{bp_*{NmY zX~Xdotr8>HZt}!~Pcx8aiSKQH@$j%Rpx&n?-%&rHNu@<)+yY9iH!b=V01m;3Yc^`m-yEy#<39j1(Lmdq;SzU zp6Ee++Z$swyZn;oKbi$Bd6)Q~r+V-`bU*ZJ?dmBtOBT~*j}FmR-f^OoChM6L%O2tS$02=M`;Cj+-dvIjjO>5!B0QD zBA&R?s&9I|#*Y(a2HueA6ruq;O;&GZ$aXF5GK*X(Y8i;1aC{^RS?K%>qDy!>@5){c zD3&#;6c$yIf4@Nm$nD56ju6vtrim`8O`rdu;BtQ~*m!22mGq5mJu>Jz3Gy~4V`Wj4 z_(?CF*t1jBYNa0Co)*y(qX!j^hA;iSnj^0ND}xv|fUT=+Mb6W?h#=eR=FLS{l0QqsV@p(U>mP{EUsY-dBZlb?s!G9;#K^htCO zq3;)p%!BEGZNwcG5os39zx8#ybSTtWt>)Q;kPH=Lq2^W{#gx*8_`Y@R>(J6|DOA#{ z>L$Lds+QWn3-;a_r!Lv}8m~P+`+iclU^llagB=l6stL+5EB9<}c%QAw8$oq?4o)92 zi5feJMVmTIT8*Qb0*?L`n8{xCYxCKx5hx7J5e}48Y8Z89Pa;~j)zO+VI@9Zv1XPMcLM|2SznDVIp@V zZa7C8pRqOvG%M4eK~=}0^84lf2xgF(Q5bmjMW$Xk5U}~v_>?Fva-I+3z?9VUqEs`> z9nJ-EXhocr?0m0(xHa;0R-Eo!@M}txbg>7tWy5jD_?v_6)tygMO@@QTQOCv(da+1i zgQQo)LF~|_+-G?*{SLE5nQ?)2V=!z(@_zD~hK(m>kn$7ac}4;D;R>ZPE3z~`iXC{ zA|3fKS5xlSU$yW2PFL02D;)6$uF7hHN2E_Jn3pn-XDCIL+xE(ba_)wWyQlB41F`9W z%Q$e-ej-mk?1lbH$*k@soTVdw9D!rs9r&}rN~f5h0<BHi*v?a}qCzlBUc$a|G*U|>+W9*CG&zNwT>UoM~lSgb$EGkozKZ4=jA_1_V zeglbTJ_)B}CA+Rm4hdfJW4Qh6Z(qfGH<0kk%Ms zLeDw5#K^u5UP%*F^4IJyi&>}ex#<(U|G>ZOwV;&xtx2Z5Tc(TgNbcn!Uh4IZ-pz39 z#kdyVd62;$$l#8Hp?gERpuqqQ^NszF&E}h0Vmwa2T$Cy_s!k!O=VwM*l*3Ief(BJL zFUmGEXzejj{>Ce7r?AP?khj=T-Nj%o$#x=X;1{BjAr8r{Fk~~E-gx)IHtDeyy!p4_%My_9_VgM z5wph*iHt<+$-b)(>{sX-GjwYhr-G>P+bR4yp#*%u7Y7|>`yUvWegj&Ma+k24Z8;vt zhP`yN6Coc>@bvOO3tUCA75R>ISAM!z%0o7FO#749w4OTmc~-6_rsh$@Dv+#yhXxco zbdg3|Q~wtZt0NRH$uVB*V`ThQWm8;)2c@H#@`kzBT(j}r|3}>5JS>6stJ-ExJuKaD zPmyVo*fS3-dJM>Qxu@7Y!MFN#w;s3WXlUEN(~bqRCJxCq;no zU*8YJw5qSkrl5fk*+YkC(CfO&J ziS60oLt>*hK7w16JbE2m>pCunYTA)|L8>TAX^`jZd9!PScHt9!7#6WXH6#LN+_jFC z%MV)gze6?LDgXOyPTzgH{-B{<_tA#r!KA(}Mp(44n?F~(4%)C9a*StaE&;#it zdz)Ah;|u55=NJ=Z-_nv{Y%s_*J1UYHiy3=WHPbJSb5FI9B@U~hUMw>)shCp=|Npo( zkOXMyln*5kO(!5+UQ!x}e=qYt~5A2@yPy!XK>EA(83 zG2BK3fHo=>g#O1F8~7~8c7K^MQb1yaRxa{O5WBOs-&lg0(SK{GNb9iG&S81@8%tR$ zdxg|E*p#TH5K!woiPC z_qz-{UnMK{fLh5wByus_#3XH9UdvK<9FJ%qtk5{b;`>vk~~lf8f9af zKi|$tUF6ta*u_YeS^#&vsIzZXV56YYV#uI7rGQFE;OaLHnx?G;_J{INZ`0PXV4eEu zx3aV5Q-?s%xvmt|wv_RL0u9YbWA!9D`_cPgN&Xd-k%d%%F7qJ-f^(RcLX5jqj@dZLi`K z6w?I{z90MbA***oGC|aN!d1*fQcNwNukVfSk9%G=ofz>&tf(0gG1;RTFE+~-%G_6u zYwP)<4cBGYExT&oesD0gpA=2A+(lKarjK;vj27NmGLOZqybVR!-ZT1~ZK@D>TouvH zSI9N!z&+(8CZ=*J)kbT`Ix~8DV<#plU*Tu$qA}O*%SB02UC*MgG0BSj9^URFvMji8 zntSX&N#CB0_BccJ&mNyy*YW);eC+2SDTJw6U>WAuVIkF*FaYM-{Dg0SZEWc0*Hc=4 z8LIpe0{Y^63uWi)3*Hs7zUGNH<}MaftB!I|xDVinD-;Lb@#4+t%Wl>8NAvkFaIte) zG%MJEER`h9F)VKQ6VLDyT?hU}-R0Xoy!k79!Bd;2Jw89)osA1?9*PYL*xooVs@_sd1<%}^ z!0sI#bo8wiUiE>43!1uZ%BQiINY|<{e-Qh>MWUcxW@TjX%x!KHVzIvPifS72?zCRc z-_$r5HpvT~3|LLb`OtQ@!~eL!iw+)eN^yTddg^Iz^#%vC6PoTx@Dl#!m1DcJX9N4& z_K)SI(HUXx2Me^rJhwz*|Ftx-M^9cMVC%EL$I0N4=PbHT{V0_Zy2*o%9^*(70uoR2wk0mLCId--kwAAW>AQG2NT@_E8<1qz9L4a^rC zs~kO2%kAY*N(XeSMmeoi2xB z9?%DXV%m&&xrlaRv;IOwAW=pQi>Yi39b8hjA>JV@9?BrbJL5~w#hL!}UVHEMq0`bB z-LHDZczQq@c!hC@-RYMk*WLn<)CT>SeM#gT4UIJOWd~6qIt;$nclyJ%FZ!3{{D$$& zOF0ak+K%y%{Vq2EL@PmPWSaSX(Rj6?nEt$bb+j>{?8;=&^5#7<-(m}{9jLomUEWkp zp>n0=ieY}MdX39sZ9P-KaKO22ePp_uWgm&VS7W{C9Av~Dd8fR zUM7oPCW!i?hj;yt^f_-hg&=aPsd4h;+Zry(dj!14lV}&~^}qlXg|5q*8)Nu)$m5um zOcD6ADg0tYeA3zl-n)@2wR9X~XPy{vVk*nc3}qL6LN08K2b%TD;kagM^JWv!3L1Ed z@cFmvKV0=5*|G6R- zyQPEl4IB2Pfzyw6!HBj-UEa zswgUJqkl#gkMjAYB1lF1pxN-?X2v4%IT{@^kNrpexiyXr0Ycx=jk*L=Q+Cpp9SRorOO z#mq$zlb7Af(%Y2s_7PN_UGy^jDOm!aWgi(gwglgM)S~6y2aeS$xu$#MbAc&+@&#fIvHnQ-|*!(gn{aeY+U+%N8spuPTpQJ~}o_x0aWe`_(#b@Qd&8Ea$K; z!gI*8;|$-kwq^u*yN^t67KEzX$4@QW|4+NBQST6+F|*EnjpxK0V%!@S+TB_={4M0} z%_AKs73Zj7>b@EtQ%#=xY~l*t+l*zsr+|W7>{P25T>-eTFQ z8@De!`HL?Iaz|t^%X4pf%{dN3=##&!C}L>7N{ zyP=!@5+s>0LFFRwfOnuQ@e;$HTMgL_JYgumc?9U14BparvDQHv#p{#<-Zs3=kcj+I zmvepO-THzBfvY+`iDA+%gTrrjV1kZ3A%m}8KVbiMOx|)0#KjX%OzD#)P zU+e28t7k-x+(YkqDCp?%zWbE@nt4!wTo&UcqelE|-+wW(?|@5zgFG?UNCaG^*;E-> zq{#F>*xilR1oTZ?Q>}D;C1+NC6sOG7HG}<`_2fef!C)HTo=$_yk#h~XK(@EhUDA3R zAr&WjVWLYj#o;EBzcG_uW6RCt#qHWd%w<1nZ&_8QM2ob7yb9D|!|_rA5~f9ZtUVpz>}+1dEGnCCY7f+5*nYm9 zyUn=uZM%4=wLAI%dYdBRL{qZfs_6xdm=pJNMV{toI%n9IIeCIQasknorX5)Xb3#f5 z^bbS>rHs{R;gyTpB<;H~*>CAHB;6ee}JrBSXF63lWcBdxd37kH3fQ!wf~CZSFjCo9swcR=rL8NygK>vdZ$j zC6ishk-VV1rfnCkkJ)#p%A(55SM)>EbNr&4wRa<#H=Q0C<2ha*Pp=UWuhWXn8z(}k zW^?a`0g>3|L6HoOrPG;>FJkR2Ejc1N${Kuas=@F>6rX>SJu(!$l<8SPm7zZzYpQx4CvEKoB!#Wgv302Ay4Cw)872UkRu5d zOE+RNn*S+GV4g@R^a*&*z};93K+d>9v!72+jg%Cg%_T_I zu}@N9SuNDJ{{8u10_MvR+u0X#Nki-egomo^YQBY?{KyQQ841Vddr}fQ`2hLK(#>=N zvN!S8?!~N+IN&d-Xyg(;Q29~G!yh9TGNkb=0nb*s4v;v{tI~A9AzbfARvgWcro4xq z&7;o|U?`nD1vSGUs>bqufOyI8YYlC4E?G^Z^>LHrP%^NHVReE7KxnEoZgrg8T$(5}Zh?%}aS!qp;o-m(NKX=eFswD6BSq0XFw9V(i>Mu)i%g%=Gfz`4 zUsX(#(xS8I?Z?RyehOFSU#0{>bj&UUq_{K%srLQ8UBU74R(sIYsFaFnhvet1y;5qt zRiA6ggT&Vzx~NCGM%xL1=u?vWHQBbn&tVA6hmR}x&wzAz>)gIRe$&xG8o{TJE<;j zk$1Q-J21b)Hc8EG+(8ay1&y_(((XD;eV>wyt<}uPa_0M~4p<-2nxVd&tO)X|;c(hM zZfJ@=AiiH1%cF%qnokiyFE3=!-19ICm_;M7tMqdZeEQP-{O04D@u_<(`=q`fj{Qd& zxTP?3bKui{Vv|H9;3)Gu#z)L-t%$&D3cZGn@|XJt-Y;r9#L%Ig7vy~ic!r*)yCt|W zK?y)Cvv?eI`lG~L3Gj+n5fZ>Z{|d%)l?D0Y#hfJCivKaDv>jYaGB+dHWu~pRGn}!> z2B+`Wv2% ze)<=p^Z4;_8D~CGe0N&WfKJ1p^Fb?WH%m zcSrVIM>va!www9Dvm0#~-jnB&ru1troNqzqS7dt{-W}0UP1P%zoL=0xh2e+%lHO?# zz~52*v$(`5+MR%3$uRKwgd|SHi*sqfwAuS)BV||vw5>PPa65%I4>Fl9R-IWbX&F1O z&gKiAO29$D|2pHGCggGU(Y@^IxuE?}%;DKDn_}a-WF5Wm=tUjTd5-k->x;A2Qws~ZFkhcQ1aTjlnUGzZ7R;@AF1jX9eT2p11AYQXK)PR_?Ycsj}#PYx*gLxzw(PD&M8> z)dKQev2GBj%N*cmqQ6_&8twNy(HIiCz=2vG_oQJ(Y*|W9Dzpw5WtJdLs8hW@DmeB? z>X|wn8FYMX?GIlbV5c;_0n0vaq~i4N*;-kkbU`<$*kJD+&p$(Y{))#Td_CYm=o<8>t8 z{p-TB&a{%JUcRTBD zy^Mwjr&3IAk6zP5t976BHyr-SvvB^!&|m5O4}7B$ zp*772Xo|m1ziYfsClZ?@Z1!+e0unF`#4<&dn=f$-0rPnvw9uTfVJ_C7vj;n1u~IiT zI2wdLYXH`M?3d;dO1^}otTYk|IKnQyuU_1>Xk8CyiQHd#=~KQIbfT4MJ&yS9JL$H! zln`r?Ij6VkT4rmuI4F(=IjAb<$Q^El<9%(;+Useu*uSo10*x&sDWcQLnZgP#1%rp{ ze($MY1TJ{ZBX7D__D6$#5f{&x+ti!xHWcq>gHG~-O?T=9CHcy2qxiFm+#e_N&Wn?O zEWJ*Tm8IEU+eTJLuWy9=wEJEEC0){OYv0DEq=@g|EISQ%r3CL<@3>zCFZIt$2IKeX z?lcFfNSOwgtfiqhjCNSA?Cw$m#x>0oQksl2pT8UXS(d5v8?15H4{mhj37n2&RtO@q zv-C&;-hiGMrZ-X2o%nd54wc{)jyC~8%uxSV0`n?#VqJFJ9i+w1LVeG@ePf}nZY}at zB@gc_JtbTCr>A3_nfIl#*cWFwPd?M_%bnSO&yL@r@73%;_mzI;G-0|@STB*0dfMs= ze6KwnU3*RDzr@|f{<10$#_64p$X|Lpuuk^m>o0qgOQ`{$s~>`9_UWB)E12CtazWq2 znJ=p!(vESLiWEZyoQlU$S#{qh?njwK2p!7glBwBswcJj_Wl%!j8C&L2{ZdG`jQZB2 z;*2ji`P0V>Yjl_)x1Vp945c9iyWs7uq#xS;+-q{tw(t$$7N2D)k zwmZkhONUH2_G}YsuLI;D9J0Kow#=V4L!9=qw}tKdo1SKiY-uz$ z0Vxh8&C`ZU2LJ+an4-@zD@$<{qPkpOvBUflcnKJ5GSto=rxZuPD!-VQ!&NdHbq@YD zFBUrH_0%uu=8uEj|Ndmj&nSmR%ElD=MSub8~VO+Yg_x3Zn9XU6y9UQyghr zx6CaxtOsC4=lYIgy`V9CGB5e|F;I#Pi6N={rhLPK8&n z`RelA#+10mS1X1u3!R(CD)5p&sCQa}XqxoOx04fPB)49$Fv`EmsD{}dsA~0Wi)2yJ z+jKTxZ6yuusI>j55n1Nr35fCS9j#%N6Sc&fXFKADe9EQ3yJL&bsLnS_HRQ0Qfj(H_ z-mj^DT_**VSoux5o&e~W(@bd)RIZmd1TDX82yEw*yL!QU33wIue!TiG$RD|hlya%Y z>c_YwPE+97I7UoFyWMUCRvZN26efNMt81rz8@8Il3*tSVg6wTYGpf9oBL2z=Ufk5q zo>{O7Ep$&^L4)X6Qbm{r&Ck8`7@T}gmk^y|*9i~^VmBzcaYDMT#diN2(Y4#@JR&(!4NHuq`AMM5f1`_y{bCjLCa=`jH zjm8kT1AvL@>xj>6G3g&d?&&+Kggnm_f1bN19mz_m)~ootJJ@7V-8|3`e*e2eLbf{zz)NQSSm^0P+Cs@>`yT`fbUlu^?^J@N& zrt=JE`wiQ0?5d(IrS_^-R7>pu)5QX2=9#o6GnvT=w789g=26wCsPD#JA{A9Z@uE=mp|=xRyakZYk*%K${6Y|T1DY({{h#BhYwg5W2uFyHcTp2=R}RC*?+<`8m&=#_VLy=bMqBs=0GhSFgv&k;== z#Xu6^S7p28MMnHx6oq%6aH%=qv3Bxh95m*#TWIP(X^FjpbI`nd+6&%F_V)?G-bJB` z(XBL;CZ^r2=IC#t{_5N8w_#3NEvLzhl}FUQZS-k=;<)!~GT0)8ij}Q#@Qor4ei(+V zabROkpk;BcoJPFJko=(uHaTXgm*;skwZrm@D|76i?2iW=or{dm3shg_1(+PN1!B8y z7g>t{%cn*W>~`esKVhb#=c{0>rk?YKp z6^C1zO~)$zBeZv&GIR+`&Xc(z4g$GN0xdq^yZ|Q_?(I8m=i;6qNTHWe{7UbXY#h}C zAQ5(~t)!a^be|LYfa2$C1Akn~Tzh%t$q?~JAb6?t;;7$8{_~aQWslT1AAb1TXMIX* zufeFk`)K?29ow~7DF@CxFWVyc_x^qUnz3O4@FoztuDg49MR29Mi~k~-J*K*Y;j!3* zpO#tu3W2=z{~`c2f&I5MMxW&DA4>_c{^a<^hjB{fQ zUxPP9wJ)+bybT?9T-I27{2bjEC zz@DGYH!xMdz$dazX~*8cUhsc-fV*bY1Fq#}#GKtr`NrR8`J5FcH~dNDIhrZhS4_Vx6lfUhlrhth4UK` zTjpZsIOY_^vbuk!Vh!6tMn!)gfg)ogj>1@}7oaK6S(bZ2le9LZv$L4I5enQ14`^Yw zPPtP{%tG0Gv@g(~o_rN!xKeNSZ%k$N73?o55+3jF6*^Yi*z;&LHw+lmGds&_<86Ny zx?%TMm2d|1=-RP*^8YLVo{MAO8O&wh@XS2L-AH_L`&nB9Egp5XDZHx=l!HR}2^OsZ zbCI-8kw&H6kV!1F)ZS%)&19t_IM0y??ids1k@5;`nWi z2HNmF;Qr-oBV+J>y{@g@x)h=wb~Ua3^cTVhB;X!Wk2&e2(d6*6KSKh~pN1|Gkhf3} zTV|yI%PnUX`E{Z2A(Q)=ZjdTbx;!cz@RQ?mJ7KpIkcr`Q3WY8kRVqVv#%ZocoC!8Ed$Hi=h(Ht_10vO4VJ3_CCZf92-zj-zct=6)t zE1REr8dX(+AE6pm6FTlq$I`hAw6VgX|GFxWVrz$-Sn-5E;1g%VK)GxsqQ$h-hBUQ+ zZ?r$lWT0Pb!0~?nYbW|E39EMWXnDkgyqro6nS+dCPpg0W(Vz$^mH-QvjJikOIZgUW zy4;8J{?ZQ?X1ovokrJsaE#r2RPq1$vj~QF-GB#0dwm~-*mZ94|%4H}v!$X91gtG;D z8A1eUNIY>_xHv@t9{||!+sI(N>ft*Uh7Z(@=kFRk0Wl?q)*n}?7c0JKue%VbUHt3 zo10nOdH!Y?;Hc(xI&R17;At;vvYL3&$V3S+Y`Vh&pyst8ho8zsN$=g#a$=+9anXl5 zU3oDv=+FhU8!CEM!xt!oGZ|-|XHN=G(527aa8MgBv(KX-2s~u^( zqLBKT5f9PjH3D(65eQWLjq_HD2S>WuZz8!=lI^y?^`gx0_us!MuD}6AAKn9~b98TY z{s|;4B#G87uUp0_%|=~?BjkebxnTbr3V)V_%I4ntVMe>mgi)4w=h>n=0|vafB=bqS zUAJMbBXzHlvQ6-7@MP=H9l7~?>@?R6bGD+CI$n_Sc`eFtZYx!OQ}X$Kx#LH_GuL$D zb2zp@kYW82zWIJ%BHeLr`Ih?XXP=c)TgPmD;GuOrq+Hbbu0fQ-@gEaii=l_seQMC? zC`G%*`NQ{eWwVu>&d2F{Zq1($#eus5KP$A{BuyChEND#|aW?o&u;-*nu%_*JC#x*4 zqyT-!M-4bp`e2N^ZJqJ5?qY}TvH-Ahc@kMWW0rf>>I3Tm<}N_@q(Sl9VRGV5tS-%G z3$;*gd5)W*oW;Q1dt(h%mfNRu=K-stMPPwh@=i-u%{NFZ|0|)_PfG=Y;^AbQ@>Cm|I-x3mWe;Skh77wmBAEc~9JZ~#JGboUjjU40mTZKX zyA27`!&=ZCc}HdaB#RiGVvk*C89Ym_s4&-oi@lRf>nW9L{b`;!C^#n_RFVNio6sq3 zj~DQg03uW(YbJYtUky)Qze{XKDp!$~2@Mb4==Q@dQTF)*?Rtrjp1w!T?ctY)8(rAk z8J>&%Pev!)%YO)JqB;bzP&%(i?S9gIb8 z<$Ui3)|;q+&D#WLdGzoNQpY2kRy#QY;G<^B)i z(bFhSU%hH;{+<{{y!;58Zj#KsxyVGqa_@5e{FAK$+00kgBq3Dy3cr>YYKaEe5} zDhXbZhlfVj*YkjrKU@&!>(0v6S=PrZ2=W~<3b%Q=BZ8s*Wq~pe#W57`1r4#AER@4I#If|kfDf%Oe(@^ReiUM-Y13d$lg?Wlb$4m3qG)<+b zWbT>`YL{-A-@JkMJddEw1kkk9oswK1(aXKJEmF!(25dt?iMo-m=lkh=o)ZLtW;OeR z;lx16!7;4rnakQxEl->0wq~!swXv7^Szn6=P`&b44&URheN-7yY8Z?*S)bhaM7Nmb zS88CC<7mZSZULJqn&YI8d9@?&vyzIV1yHL+c_oFe*>T2bfx~3isrXYSErkaZxjRib z%)Qo29Qu6dRtqKW{jOZQf7xb|9%1O7iS;>EpD9ClUiq}HCr9Iy|(E;sb{nbBYoPLFTIp0uYJ3C~(l+hq? zyfVDi;`SP2f=;Lw&wXVlX4vjhlVa*YFJ!%Le+L^5`GMSM3n7ch9PR+2U!dRVITI2& zE&|Zt$+DWCZT=Orx(6>*ofW7$G(k(EwbKEH*X^>>O6fl}woK_49#K^F0nGsGDWgR@ zsg>Tv0mD1CWh(c?XH_m(wdxGdy2GzO-=kxNY8nY_Q?bnr+9!78WiSB=>j@TCmR+b9}Ufn&V_E#a)x@t7qO6&2J{h zLJivikyeE(&YXtF=1QH*l1BbKNXos>81a3%mkQ-iRKIJu1K?`AxZ7l)NA@_X$8K&(n}wM|i+=;GA- zZU&QNuP=!fclZxKB0inz0oNG&KeDK>QhCJE0e|QH?Q_uRY39OTn!QQKCh`#XzsJlS z2ugw(6_2S3LD#d+Otp47i743EC9Yd&?qKe=PE)3wZrHJJxZw|a^K=$lsmuB-jlDGl z6}!~X{C@Pl0We^k&q;M98G{QRs}Ioq^*kemjyd^UpO~i1#pjT39$K8U!LXqXH9I4v z`_Hi0Ii-}ZasBflMK}x4g}#eA@=wjcloc5ly*D+{Q?Zl#CQ=GNeEDw}ohVL$BYT`EYje_jx!!^}Zk;eDXL1s<@hL}D!L#e!2qW?g5&o^on0a#R@23O!N+S!t z=RR)skxksqam|V7zwWozJABUwu^LZSELQ{pmQrt2iV?nH68+ylgkJYr@kN~YE`>`8kw0{e0^y3u^?_;XPzeO5f3lBAgSLarm9m-_I!ldT0rQ zV+3@4UAAfx_AEP zxTGBmJyfo=>w!}P<@4YAKZHiGkKIn{w&oSUPA!9Cs1n}F-AqyiFL-}TN$9u_t$#v$ z)aRZqEJiSRvDr>z)Y~vhd-FKZhwS4HMfQk<;7y~i$A2Hi-|y~!8WQ=~ZK14O7avhg@p6-!(t{B+U@&Y*SrBU%?oP=ON`>etnQ zPigEc$yTYp#gENc?Ru$T5eKrWswB2xbn53yRhIAkZ@)z_PJ_t5e3n)ai`oOn1d|vd zHgWt^JvfGIv1?i!E;UG@WDlMiRVTu+DjA2L&ZmXcJHLu1w*3IZfF5%+1;F~*38@4Z z&t#}v{cKczic<>Zwap$ubCFdooX2#N0~ir9ri+Xo%S<_;SMW(eDEr9zR~Q70PKsu! zX>Z>Tr%M7w1VlK*i5b)c_X*8r-oE!PzI9U|4+sgM{NAdEUirG!aw{UcN$B+1?EAXl zo~mmRuiAw^4J#2ka%&<16#$OJ!GrceTv@-2Sn%_VaRX3ub{EYvjjE5F3cIIJd!k>7+}a? z3ED~5%#BNZuMiW_QnXvok9N286p6!os~qHWri^`#X&8Q=EEz6ZbB>L6u`9@43pQ4} zNFOP3Iq#=~3^Tb(5qoA^e?E8h4+Ior0g?WPeGthf_ZOS~TimRLoP4-~?|;9Vc>M-i zkxIrYY}6P)S5CwGw(i!PE1i4iuwgErDls?WnXHR4_C;hL! z$+&J3W&9QJV$tZ@>LZzCA#wLxrK&k>QU8rt1=JjK5fiQb5C0MUtqt#|ZLAUc?ZK;; z*sDbm4EAUY=zcBHCQAD>{(ii0Ud z`3`Onv<6GkCiuziW!4;VRqbmVh8Vwi98!slJ+wm_;da(A#)dyfa6n=3Z20V8`vc?!vw|l>_*$3x>tL9cfSbEWQn&xx^svq?6Q~k zRy;`$`s4F0bwksKv^Z9(kqKO%eS{4XLGyy6Y_eI>>jYEwQ_Pu9f7}$N`4XtKa4b1P zbwP$qSzBQ#Ag(sf_#GY0>0yau@OXo0=62wJJlRx2hoMYG_?|Kod;bU_n zS|sK{G-aT@X_u>T`{hS|zA#aQ`n`EAV5*IuUK%lGprqxWl{ zha_X_%Jktjx?gI$d~e(3l~cwRBOVMB*?2|gN7Mh9p2yLaH6AyqVcw|rgyW2$Wc(3X z#&Vz~nyUMR5-Ld+s$-Q17XnzK(x3 zQth2x?H=R@Dk|0NTfKytMY$(pNo{ve1!{{qP71N%SczK^$vi;x2L;%WQd_@#g2v!f z&+9CX7>yW*nlMQ6E}0rH44rrGNPC@)sW`P2S8xcY9{OxW{>2@1G>_v-L>_={tMNsr z(NY%i(AXxn-}8T}6pK@NZOzTG8=jU(xvS9J)x-Um(M592xjZT+`9pTiAU530pRbSh z{+!=gx+?5g33pH3q2~dhIY4e$<>Kf0T9oZ4+wt(oM-v=CNA&`!Jnlidg8q~IIF1V% zbYXmbEHUyQh-H_R*opRzrML-rQ&Q1Rbf(}QNfT3uHpHjz(kQIT)%1B!?zj~;o|sJ! z`vEvDF;8X;3H?M)v7;k-o5iDYNcO#{OqvXcu4YEAXkn|*Zz=1v#?|n}KDhJ+yt+eMs`T?aOZnbcI9E<5;OxMyBnhw^7uU@&K}%AQP5R{CA0 zUilkS-96;*=BS(L|6BC)BVkx(Jo5gRpkJ-xV<*UHESk{(`88^q(0yhjW-&RweM_Ijv6Zn zK8UuPno7wN(x6zAckZ{f9$Ws+FKm4iY*uzmK3yEV-IteJ~as1s@(dMg1Ej zNQ}>8c*sPV<^t=|9o{sdfDV~_Z;$1>+d|B)0?@-b$MdIb)5?b#!{Yub0U@$vY4<&F zXUfVT2*;pC9jksFjhWkCz|_v;v%S#N^}i&n&p8@y{X=1^D@zUz+{I*Wd<&Zv@dY2g zM_pAZ5LOuI*5Aw_uK7Ix=&l+^KKoj3nTPI7-<->(^SU#W4pE5N zK)O4LGsV5Ag0Xw(xh(OM(Hpt^D)*;vr@A5ywhZ5}y1AMh@$@#Dr*INTL&(_6dvT4T z7jGMaZxgt6>SSCQg+Cxh+x`3VTeaDu%dlicoEl1i8~P&9=((pB<}=BoD0&|O`Wd?- z;Pnrr)JL4g8@|fVREfsU>Vzi#Nq?>iteO1GDx-gnt^NGPiR- zUf~%4&+brlPb!wGE9_rtS&i2aWDbIoA&YbEB!A;d6R&deSBND=6FOh+)c%#xwf2pP zNybL`wbSJ&#a&zf+~O&7Nfw_kjQ)x&?hj7dj&IXB#VVNT=a|Hi{V^ZS70Y$Ji&BW~ z9M8Gl-;_Q#Vu>z^+X=yB?GUwjt}i}o-Yegd_&nQv$u?C^_2IZgsQjxZs%5tF4?}~; z^w=z`JLXM(U4UYXoX}oU3lCjEEF|k-&oc4E(RR!;Q9mM&eOF(>e*gQBoKMAozY z1xj#M+Chz&QI%prV=_}?Vo1;RbW`k&i4`dt^jek4vq3p)kjbLz~`Yov3 z^^sKLlNA5p`pwycyHqHJU}FB4ZELtG@grch3EWv@tWLA`h1}Z|y0)|o3);p|+O$)3A(#Xfl0VP4p71}pMn5_Zi?&r|y3 z@Hq=mC_~mV%saWtKG~qd0p0&aBX^{>p23C%JVi+5%y9V7J!F9A; z%V=ySAfDOA#p8xYr}3*z^*EdQ^O)fGa4uTjH*Ac@6Nsb)wo5icC(Bg2!{>(*{tx3y zB>a1ubZwG&I#(Zz10P&-h^z83TP-#?YTnSCO%Hjl=*3+x(X;!K)uRQQn=x-U3$}j006dCD00{7XxPfD9${ps2Ze#2m z%{3Ckrr5A_<(m6DeZWR0KqW0?EZz39=hl2(jeumYlj+sm_?@4OKfJIz4#Z9wp>%^S zoXOOA+7sL>rKcaq-lb&yeP%uOU61cz!!+a!*D9a6&uAE8&?2b({tkzC7QjuG3@xEL zF`P-de%=>L-YD}Hl#nr8Vjmu_cbeJ8W()xW(qzft+RwadMYeno(iUdF_C;J0XfmlT15y;8!89&So!9(crsAJ78&jNuq514LQ8%<0T47w?a3)gT{s-;8 zHxr@AsXdHvRK*Y zXWVKhx0K{)xPxB1%EeyPY2g?pp%g(DL6h8-A@$&uzq&G3{jL|;dd3C>)ehJP;kJ4o z5xf6j=qXU;c|W-QZhMSGEMvmCo)5PBvGooeW&Skmi}iYaoQ%%oXbB{lOYk}EjOZ*om_UAUC zXXqypR63Z3TPwCGITm-F?3ZQ}hLiB|nux+rI|3~O5^iMzJsk_9R?AE9U}wHo0mv6}+Ou=JM`3=|I+zBV?5_5PI5og{>h$pFX& zqFt)1?BaM=7tFsYks2(0cn471uCJA@-^GJ zKNm5+!E9INvi*g;ZG~5_gxzkNHXx7BT=7RD%R^&#Rpi?w&=8Pa3vs~}mfYJutdCLJ zbux}9bH=4R=_+VjJ_C-i<`QkV8Ua*;%#AZJOn8q9eEm#jja@Sr6TBb@JN^-6i{Q6ED{JvW$xM7TOJkB?Ussp zAm)54}3W0PAyH8Ix1&a6f~y-W39};#aL<*o9>Mq~`BOfBb>S zSVT!T@d0Kj_^TJge-CbTHuKgSBl(q+HF(~u?lq)XuctL~Asp!+Fc)s6g1z3!K1T8h zC2OBK_r`PPncQ4|aeJBGueYdG;D&>+i-M(>aR?xWleUB21Y0fe<4-$fyB-cVMRt27HYR1N`a5_cs|w3bd}L`tRnD|LS~SD3f*z{*vLv$%EUC z?eaz*WZ%~s6)y>uq55)h@EiRq0!W*{lDMo~O&ry*!2P9_EB5N3Yd{&*nlZSMKe`(HdV}04Ne#bVQeEZkV2J zt(d?txA0?R*V4DXuDLTPjudDIyIu-qX!3?OX#pz71ry?J0zP>G~we)^% z@Wp}?_lZsM)FZ3vo$mnH;pw)8BZ@x5@Jqk*MOsauUqbi2t=_=s5?7Y1+NKPa_7B$4 z{kXG2NEnma4=UDZ9hTpm+N<*SfyX(KK35lbjFX!6_-bPqnoj-*-Q z>rncu;(w_Yd9h)(>l4*qR@P6#)ly=jKJ-^{KbAG<8I0r1W-(Ly>GY>B#>_6LXE7Y? zo;%wf7~V?ue!PM{+2(Sk=Yp2HCeK5aGtEvjY`@R}?I4h$Ozy>6DC!~kM*vE@HUyjH z#}7FXr^h|X?0Nep8D#q#{r8Ii_%Iyr#$@a1mWH*^Y?3zU1)<`|LA{}7A%GMuS!Xt5 zvt*o$v!b(OGL^SzC(n{$+9(pUePmTT<@e{|6{3)&$CK^ViATPFaGor!aP7$**o zy>HKCZ34@JO#`GoapX7MWX|1?7DIIu*I-2pQ+w-eW|L&2HTq=oTBl<1E;1axDLbo@ zG3TE_=DK3MR>xLB(?SzddK=}(;R{ES%TSAsHjT~zah0D=FEM9nT60PD4P^`)4Z|&? z*B2Lnwx^7(%+ThvJ1p1DR+H)XwQ1w}1BT=oM%o$Fv*7a@{tF-&+KUL=Ph@nx+KvpY zhxuIk&PWfz^gqw4G9o+?p|$n#l>`F>C`Dkl*)_cOBz?}PVOQp=s>r;3jXIKOg;B|q zN1w>S*Va;ChveC3ojeP%xzYcJjkbZ`0PIyzLm?Dw@JM@&VCj``kyY#Hmzv{wy2QeC)i5*@$Y zhEsfzPHetHb}lnsLrIC%tlwAVV(P%|@f^v}Gd_d*c81*@{3+iV_Q~Nkp^$i)bm;Tu`Xvg{Yl`bP|5t86r9bW z-DDflGcj($E}9jf*1xQ>&o+)R*t82Q8X%&}HQMDv;O0ocGR1X=;@G5l=R0&3fEMC_ z>spf}INbiE*$NxT2AA(CHP5le?r+dau2`P+QN@N$Z3rR#;WmvdF(lx|hv5Q`sSBx` zAzQ<(MZ6_;!3MfhVDMA0cIpm8V|%)aGQ@h0BLtBY} zDd?vC_Mk_EVL8u~=kn&ZlxG=c0v{1N1ID_sm1JpSS6Qpn$(5ITTkZrWDO+3Ir6~G| zsaUsdwb(^QL#n0@VQaL9i4~JuT=w3qJdnC@+$h~sD8!Q9U)%~ERwk)K-&1O*uN#pM; zPYcHBMHr}VOoZ;$UOKD(Y@`jj?c6dPD3h0Pe^m)m)E;aJp;LjL=jSR^Rzj z2<$(LA{j2$EEFx)p32IeR)PWIPnOB=3dXH_q1`#>%^f!oUgb^&dZlF$m8u(3(9Z&K zx>j3kBbRs(Z-!#VL*V|N|KP>*z0$xCZ+qGs68T8I z8FN|CGy~GTeoVd1J|Tt5MdVF3_!DVQ;K7tg(&iv42lNa)7SYvuV8-1s655 z*|(pZS)<&w=9Fp-IWOvpBT_#lq2j!BtWa|zH2f(Ltc~$9A3(*teRY zNW0wIZR@CIi>aqN7vat36VQ+2j#lPEWr$QsTIpmJc@&FFnhywU+jNgukk>baYUzc8; zXuoZmF4Tdano*zR%JaBwb?!jaa=AV&9C}=)wI#4pH>|D zB8SS-ZmFVQ#o4Kxi6U_?33s$iH;a|^pd|c2;bsbXZ7Z^vO zx|U+{g=tt@cA4x2zf*y3IoB64-gPAmVW@^I`D!H|*xs63-0Cj9_4btFbj|;&kxj4?nA zE)5^$7O^v%xj=)bmKQc2oWATs?g4ik#$3ynOOdrpl0Q7kEbWFqU*rnn3Rkq*n0bd*)VIJ1^xuGeNj7ck^@ z@eQye4=OMSsjvZ4Z>J~7ro82{qdwwle&OC5&y~xgWW}wwjoI;{@Rp?3RI*z4nmjru z?b0tx`l3T#>x)L}xrl7&sp`K$T<2n%x6DsRgC?UyvRAF{dk4Ja)=8%_N28Cws2lk8 zyk%eiOe4sq!Ay1OriUA5S}*W~-!gRvoaF3Rc%|=nU7eQhI$Rxc>_RYSPl8oXQ{irb zj33taSjGIcmY#9P?kVlE;!jm1PKn_wb`4zdzsDAGsu%eNgs=9sNWZzVZw7Au0i#SX zhl5_OFkt{}ZY~eCT%e7wiSsyJ6$UnMJ3JPBKB1(Q*3KsmnDF_r<8lF=QCVa5{-uy_ zt>0LSGluP=^Y#s?6x0mNma@V?%_ZG!D4^%;i@w^^ot7H&Dg<mr zWuV0!tfLMtEK}cVuO%c|G*z+$OHhUkIrwmnQi#MEDDBqaVrBSN zSrPv)u`UYn(Q<;6WW0~z$oNxUb*1^ca{(@`Ia&8LTdo{ce-mw#q za=dIOOq;I@C(pk}Mt(0MWrX6Ij*Hw!_Db5DL#o2qFjcIQ`(S2piOP5mVNg0B zxSr-U{!iBW?AycjkL$63Cra1vGYo1sK<-E6pWcE^NimdW`K| z2i#`qI!wyIxjZyu5ucQ;>_k~-8Y3dhtI6;3^`qI->@qu34*ru2KgvGe-DJGUknp`& zpsn(8EJwoFKCi{LT?WY(lxurXCP}@oWBwX3qI37CS9UoHy!VJ!T)?_hD5mB{p1#7m zlG5wPPF&NOI}Tv9bfu}re{!;|cv6vyc*C59J{Vx*&Suj0%jXb4bPTnS{M@J?8ahi= z;felit&zFPr}q9oh1E9MS`2}9m_cQ$ZhcVB-I|mMZ$F5gknjs@y!BCQkgd}_5m9$U z-r=7re3LVeN!7mS5u`ohECtY;V1w(2PWRiIK1uFS5&q`=G#f_FO#R&LGL_KaUsLyxm<+)8>O@!#+6;CV z{*pYW`L{&<;cnP+RH?&}^fss6vS`ge*49O;;|`DWx(13+VvKbnd*0kMDA(rgdq)Et%zk5x?J zcIWO;%p2EUGKT-g5}EwTo3^63b0|pRrb%%qIE!QK>rq^w?~xkhX33ppIoy5Ql5fT3*cOrO*E%s zow=SS&G?4gb5H##{i}_7(^J|{xfyuV2tGL3-k5FqRi8P~G@!Yqm5YWz*A{_4+J$fw zzVo_^GXQ7HPFPU$`ov6*fl(VJ{iMLbQWDLBX64s$s;%hoKE`xws5!PJSEga(>~@ox zkjakCscwk7*!Z`~d7FO84(X?4!Bv0n_D9nO?c{t`TGXt3@xE@NplocR5CXgTs(DE)fq;s-HG`h(G zaxIYHbNw`O>^INV)n$0emC`xU>nJd^TdWL8)4p?6*^-0*TB|y1cO1}Yr}4Yvkr@49 zSR(|vR>f$x5!+s zRlP~{&gzqtpdWqEkhAk~Fa?#J$YLpKptx1^;S>ZaatER8b7~WNoSPLss((@v^HS3$ zE31Q$KXg^knQyRV_iF_o-^Cj z)U6Wy)tMuT9P0jiCWuB)+gy3vZ4Ke8@G`?|bj(xb`mXDYVv4Bb>bgO+>c|pmw;a@C$GGZUSKXF?)a;trUG!4`qZ7?Ds!tR_2s@IfB9^2xLb%OP3@r9M6H;S{ zgd1yZKe1a@3Vf~Qv>B>qu^mS%!5a_2M}S?k3M4E-K`wKlXR{dF+*x3&t`)bf<57Nx zXY&^po-yNVdPQE*WXo>#wq)jR(jERWep>yuj|V8yaOuC(M3Jt?-^JO?=untC?;y=- z5m;E7Y-Uxm$1V$YwCbtqxUG|lM7#D;lY2jUboaWNJGvzGUEC;m=MFtC_~MZwz%J%e zgTS#<*XQc`^+hvHlFSBfN7XD(?4y+Q;NEDN}~z z^F8svuv}b*)5?q8k7PzKg&lw6a(q5jw2hi;uXD=1e#|?2chSHBTOM?1V|lO_(wB!q zpXM`$y67*60)x9rX>usdNTUq;32@}{h=VA$Xnz5Ps04AJp~OGD(; z6`T%QT(Z-l1*S@jDvi;4+&!bz`&a6-=T6SPWcZe@J7=ZdHx^sOBc4A{MiO)Djx8hC z#g`Kuhpl_fIR|_ZAlJ{NF>VAc3~@HoEOEGY=$J8G-C$bFmLPcbRW;OVGeFMgG6 zbr~8GYkFXYSC;OWpGO6D=MlI2M=Y&ZJ(% z9Zes-wtRBUo07NqXf3_f!<1w3o)AEVeE(WXn1GAff`W#GR}Vc0`+Jr5$}w0DLRn%8 z5_mBVoabwwu^@qaT)=BC$zQaCL!lc=nC2fiM_+d>A_cFI^Vq9Ll?{!LYHPROunqgn zSKU(1Q`tyA!IA8%C7#saXJPKeqOr2wY*99oIv~RvrLZ)~8Az&0Pjm2Li!cDc?+Ku< z?Dw5mY$ezc5lvqDcHY{O$upxKi*|J6#_LLjt#4W5NU56L<(g_;iYaq;4{In%(f4=H zs^_}B>e=-0r2Ah0!+Vf7a_lRey)J#cY-kBevJDuMEvMKKn?Vrl zOR?ach%~#Y*pE2`>4z`t#x=_Fj)iwU&*w?dG_32K_Fbnq!)5~YO=j?i^XGOPpfTM; z){Z2vx)HMeOxBWEdF|{4ES(@sB;sTo2j7~ zi1|kmbW?t)z8ZHgJR%~W!e}#N;GtR$U;^wm8dKWdiCoUNd46|M1*t!p#>i&#YiP+U0 zkM7?B@JzE&BvAZ&YMQ6I#`>J{=9u&XZ@e+I<6(pi;Hhv<&>MQj4lTO8Ai&2rT}=8x zbATgGAk`6c|$xR7#~8ogxhbgaMm?q%;f|F}fFYkB8s? zc%J9w-rdKx`}*GZb$zb$R5(K%?>1sHy#`phj8=|{c$9ysaNS1!)$!&&QsCfD{ODII z;oLA1KA0x#2J5`aMHqOXVA#AhCdEt$vS<%fUGD7I6%N^GdogPr^!X9mPVBbJC428t zKiKilpbLxK8Cc+CeDJVqn{$U#5D(ICbqX1+#~)gsZ!xNktDT(EjrtJIE4J$MDkD=8 zNT8TS3yT+Z|Nb+uOY6650rVD(+XHrS^@&H+(Ww8JGsf*rAcKB6ud3damv!>nfADC^ z20Dg+pL58f*`~4GGVyPfSfX;p;`_irb%Dl43I8t1o$a1m6x!CKwz0b&<$B@lBIEh^ zXi>?+7{9u0`M_Afz+$R;bS39^j>6A4<&=s#;gFEqofAy-Zj8foEuCM~3zfwI!s(aa z?kMzov#YTuYh=$W&-s56po%+abmFOSqdSRmq8xsB$5$3Sg-pvhe+*m&OqBu-Y6tO; zjdbRcPSTHKN&(IDD{uXOFWiT5C(=AL5r1q#Jh}cSVZ$Q=whbf4We(vF;!DRyJW*pL zbXogCx3y21S0{GEKe$@Z-ucGU!G)pIXYV!lljMj*j2S>E`3Z~z- z?hHU$**-k9d)CR9y`wC*yu~)lKQP@YP@C-d%we%@C6=*2J(P0cG^0Zve51r=J}6n- z`s%&pogpVNUns6H=d5dQnKU$%a&SKm^zT-Gs}ED_deUB~tD}qJ{hqB%N~WjGjL?-o z(st@*Wo`HjH>Y6P<+aE-yx9_1|H?Owmd(D}yciE;J;yoS-#;Sw3x409_CCIXN$9kh?>Mfax#i__O^AYRrIq0w z0zU0ZARU1Wr@%+oRqfvUhQl!fnfAjh3y8g^7}qUEXONiinH&7g=^%z9+Wpu8aLpty zqFoEuRrdfe5RTy)n;E2XqRtT57E#Xvw3pCSxiN?@X@l*C^f!W0GYS!X3P*mnLZt4y z>auPuUgj=~7x{@&S{;XkfP~O){oroC&kVC)gpQ06eD7NBy3Vi}y41nuRlCHA$ilzw zJ!rXU8csSn+KPMW0bH0X7+`W+!ypiQ^YABMH83#V23PQ~3uwh-6VQ|pNacC&_S)rd zIipA>@*M8b#wG4!%OrZ!gkYrK_C!NXKjC1%eouBkt@%Z|u9_f&dg6xb7Pm)K@cr9@ zD(crMr`F3$4F8V>0GHw2bR-J|UWE?L`aqf@I9%$ef| zvTC>w&5BVp?&&WT%&)TV-*=~a{Qwm)w~NY#Wd{*44e;ZIvGEt#7HR6)!1z_U6)ij zq;XE(*9qJ#cayEO102cZWpHh97J zSLL74U5lV#lgr&%oz?I^X0oSY3X;G(1x>Hcm3KGNPaoS9pO3|A=3Wqm6_{5SI?6&6 zBwo^9!!1MOn0iZkYL@?Q6vR&qStoY=pt~q}{K~dxl_*|z^Vm}f$Y!ITEW}cvLGpc& z!P z?V8RGTL}T!w{>!3&%nmNGtZAQ%OhgG3bl$ zr7}~kFm!W;l212)+HtoEX1)QRu+26+!+hB=e%?$Uo!9f>p-JGtl)Pkgm~f*b{nn(B&(6>n^gvIL zOC6>O9COh!JW<|R=sh}J&d$*C>4=R6> zrUof@Ap(h-Qys3hSBS2&z55XgX{t^O@=Fi;V+GyB${=na>2RK#!>8qA3Li?$0*4%- zkBiD{gWEIC*D@)PvPQ|5+L6nz)zX#M1YGX{n2~EWft9O5KYd*78#sDb2GEW3MukW# z6A^SN#{J^Yj!!tMTDufKUf^`pxUsT`s$ywByb6e7zlF#{$v|6wM_Yijpi)E6FY0G7 z=~p<{!YyV=XkgT6wfuF#*xpktDT6$}ie&Cj$$pqG`ytv86%?NZP2 zdB5AYCx)`*={!jON8nlCyV^$!%B9 z1Mx3{uBl`lI{ULlG`KpvBG^46mKH$gs4Vr4OXPurB?|Sxyd|koDQ1isvNK;bn#7V4 zX6zb~>AD<17xtC5fqld&=9{SW-bc`Bv^U()Q=nbocmom<3ixuXoW6Y4iu+Gn1l@cL zo!3tT3N@zHkYJv~{>TmzyRBbOHKmDoSvFVjx8M9FqW=+-dEonZ?_FO;h@|2%KBF+Nzo%1FDF6gL$?X^bd$3!0q7f-d5G?xmc$ zp?6{-(GML)5@Z}KWkm!6jU5kf9f7pzUeW;cPTHuM#9f)Po(-k$MmM3V#@?GpbVI~> zj`fqrj};f*`)RgMS@S5d*ZL~Wg|XfDh%U(`D445#ar)VCxASG3~PAu%7w(Y@y2oK`Mx5O?+u5w#WM@%_v>K0 zYkt~LsJ1|vKwLO-488;Bi7Rjh)Cf&7;m!+|Li84_Pu=YUg;;7U)y`hKY_viQ{vx_9 zEZkc8_J1Ktou`!6`ZMw9J$?6LjGJ;)$4Fdrt0WlG5@(oO9%FreJZ4gF)%=%#*#?Jv z5x#79%86d%DECA4>~~Oh?BuN^&rWPM&KLPeWt8c+*VGtVSpw>5@XQO)vmiG#b(L%! zlJ;SIj=+Hb&{G|6A7`Y`ZkGmGOprhB(CVYIqu0VyMrXa`e3j-h^TeJi%l|#q0J9=9?vABqZNgc~0IS_@I>ec7(Fz3d-o+8oZdbO$>Sy zc{J;MmKL8gTN~W+TiP`-LF)M|`PlpJSS9y5=IQW%HQyl!%5~VVlj~pG0GZ#g&bc3s6VkSc041;`h;h)ueho z|Dql%zrUe}>m{+FB|(RhlsEn;*?#jQzkgg#ekazH?djVFCjoU1<>J#0<$g89DwF!R zXMczL`Oe#jN-Jr@XFZcM=J6+l>5%hc8-m2)!MTa{M33cB_jJ0uPcAX6L#0eqc>0>X zn-WB{&m$}Y1)1R-1E`w z91poz9ZC}wGi>4b<7UD>Yo+*lCV_3%%Itw5v)cZP!0Q!8CYWq+W}Ua_U|I6wr;oew zaiLbgKR%l3(>H)m^bN1(lpUTA#_mh>M~wK}`G<=ju3KF#@TGZ*yk-T|_jRBy7DYg; zuFj8DDP7>aOqxlnAFJ*mURu?E$YLAIdnZEmy-W{-$`LOT#cY+q(~LDs(W2&$P`5bE z*~&|AfjWBne#L=A4x7{7xTkAn)=XDb%2tZe$XLe@7uTq&#HLi$8*fZMsCc2JHoaVw zh9z{bQC=^YOg!EAY79>etL4*w76*C_Qhd13=P0jv&os_4SX_MoC;SudwWMzXNZmvD zgH!9DO;3`J*>2ST%8%%u} znk|5GPD^>(3=h{D1~c7*rXcy) z*G#W3Q?#3SU?U<7E6J~3epClYX`u2j)vR_tS>GMjQv?`|ksUsUzJ3_2dnOdp>JMv= z+Jo1a3`&6Z>rod_UemFYqt*1y&f{#$4cv|BKr5iIAI*bTJCFHav8ca(r&(>d(sA8Z zPq>VVW%V*3+o(C}!t;tC`omWIjhBi5$hT%$l4yHyySN&yVYUgzEd5>QLzHXZSPc|d z;QL?H%LEkIkW*9KS3N4J=rox8x~B?m)sWLwBx=G}{ws<55AE3v!B-K&jcw(Y`jomeC=msGGdNoCMmDD8%Iy@ZR2}A2tMd}d2_DG%6i3>|8SD+{`_Ulhn7^(1p;=bznO!WM z_9>Mfdm1|&YH0&MOkALW;2iG^L_Vo<0Zn-aCgsTK;^z+ zLT4=+Bw31DNo@9qjY5Fgez<@kp>mv;Udt9P>3J4kZtcrdKG7AQGS$vH_SDdmzuebanMhaMmr0X z268f2^bS4{D&q`{({611X(rcu>Jk^nWLC6&mxML^vGW%Rfi1Z=Hl7g8I7o4v3yZC$ zSBHtRTnc+DQJ^wbac*T7wO1@yKTS~^z7@8_^uQrf)9C?9Nx&=VJ_@~6<2Dp>q-#So z2q8@tkCB3pJEpnZ58wD_;#Kp47Iex<&8IUYpI9}*mKE^T7)*yhN<>;y!!?~}Rrae^ zW0@t;dPQZpTe`8Wzp{hh;s6xIx|ru&XV+U3mD#T4P-r7>Ft@-->D3;1-M4f+b*zJs zZ@;hME7e8cPe#l2Tr*9?R9#BETS2*Ni*9Me1`nn>KrTL3>)K|f^UcXLJ1|5g!2e48 z>ApqvMf#!AaP*Z2R4Bdqo*KYwz>lt-{}8&54B45a31O;ks3D;T{|cTVK)#REjSMc8 z(~FIz_6^WDpE_TinT)p*z4C1q{*5-vEjNmWJ)7Mlf+qg`!$4D)H0{$P8=pkuj(Gi< z+m|zyOX(x}MX1kfYPZ@yX-d85t)($F`{R3(ph-BxliP_To0C+cIi0`|K^y*ZDkX0v zga)wpU1>TWHRWBKs9o<>H(5+?Tn5|u_zS&p`vuFcULFh2wmPXe7J`_wK zJzm&w9EN3quF`7*;^Ud^#*!l5$%=mWcvC|jL?JIwpfWVs(*a#qJ&kw-2BY@g-09Vua(X#)NkKuQ@ixD)YiLt z`=bwVzNUSd_m6@Gndl^|r}W(hfnV29>|F8qSeRcEY@LYdyANFOSst6$Q+s8QXY!{G z);eBjD4mG2m9eS0YTKI7RE;aDEVB%$`!SP#gVgsca__UY4VcAgiq9=A4^|zsJ+uFJ zWnZaibY4RgKE=U~HKnT^8$-Z3!f9zU1m#Sb&-*eoCZntfvCube!~( zH7;n7zH`w@@_@J0U*)j^MJZs0)Ok%or5~+%%K_$Q%dDcHmN(-P#DM8{XR`O~#Kl8k zGqqzLKOFErjT1ozyl1jS1c%X9qy;vd-i;nz_z(&;y)a!@hq&qdP>_O`Y&#G{C8IpK z04z2TlV&qfDeR-AUrX_i)dLD>T}P-}cQZjTGoNZ3WtS&=r6O=v)Dmmqc|R zE*=09AOnnoH*)%xvg83nL-Kh+Ef!0l_vRg|MM0ii+Fjmx%-^nkdz=!d()LrA)cxZGpf@ zvP2lj_qywC_qDwZM>`UCY^<>U{5;I2)5xN^^nJI@6kHtmW{>Mw5Biga@YkjV(%&vl z1ig$ zYVE~o-2T^C5M9S?gfzy2y7W`G%4DcOyTMp9r_ueld^c4hlQoCNhHmsFOTgAL9Cnq| zjn{|xKYXNd-3dPH9msNL8cbpy{(0I{)72`NOTrcIX5a18{3kVIXG)k+y^;ZVNUbMp z6R_cOUU9NjkGI-tCf(Q<>9=?vG(`2Z3t(1kcof=OY&~ohKr}>!>`xb-TyRt-)3-Z9 zpYLndj!1t(DYnJM2EILL#2?lWHA2tkh(EQJt4_)pDb+(=_;hQJI3jEF{>wKPw`Q65 z5z)kk@`3pE{AhtlJi!-ClD6smGEWBArlU!y8{c3P#YBrW4ya#xX8j#i)L^vRNJ8=4 z2Ir?T#rL~&;$Ze>M(2=g=*xwh!DFP=2=bRR5#`o9Vmz#z+I#L;ZI zk{^WOLCWGQccC(roV2%=+cl!xz(8^ob-uL0fjQHI5YsZ=pH$Zv=@`#?-z-vp`|R-* ztAfUwwjqVb=^rp0S8+nl4Bo}qP~B=ViM$m#S;N`#=<(|h%2ag&){LbPyR>Cm=e+2i z@RrmCktGXG6zZU>oaFZM@bsf0?B&5%g~sNSrCU=iYHa|##N&VcY1;+FtO(T-r7TyU zvx{jyxXc1B#(FQSN%F?cwT_IVC?eUYu0fYEx2dt29(2C3s_kzh+E178N#5J`Dz5Ex z0vxlpKXWiNJ=)zxLU7D2DqMEn{;GfqtTQz8IS&mztR>Fo#;!@=Xf03LbgieRk@S~| zLEQ+H06V`*F@Wm4%JtGe$rzmys~exM;>;>6?oij2)-dfcvyT`=uo$*l^DZnFwiP#u z{SeP!fRB(GLs&vVB5B9iG9X>`9QSeD%K;eO6|c8xS6Zv%R`oWVuBFcx(;K80Y>0mT zJd3hadCqYNV)Wa~p+Mb(EN<-I7d^V^EJ(hKOtlwU3R-pB5q>PmBQa0)oL9DG-5V<5 zjIP#Ow@pO{9@%q8Pw!3M3M{cvDaS7;@=7 zlvl$Vg6>85@kE!-zB)nL0hFKQe(0rt3{rMbc77BL4MrUYCfhjVqPe(tzH?%pPzCZ> zM1J_D0ftEG+N$UR@GO5;0uZR$b>wZ^3!xZb@WcQSd34`4zL$|(D%{aLRC-s5gz^jr zDI$`PMo1oEo~!uAHCz-$W+oMl;7PsxV9L(wTUC`vxf*Ub>Qu1;6Tgvtf1n-bGE=`u ze8sQolP7v$4$(hdxy3WL)c9hmR9Vx+|R5SO=DpC`3O z!&%p_!y_K`1Y_TE|Lip%X-EVjf%cVt!adz{GQO8S)BFa*42&{VQ%ml&r%LX;b8Ak) z8_ERqhWoGUPdpe|_K1~}+O}u2Y#6EYY*D~S3)0vsZangm-_ijin`zp>rFee+1xZ^a z#3shEh{KQZdP0Zs?M69-nl@!@c`N|S4FUf^^M1XlMt0e#W4@RpVKw9~0cqwraOX|M z3a17$%j(xF9vtzAdv8T5>KDGU$R;&} z3jEP5Sdbr^WaN0H>Zr>I^*Me^$I`XhHSPQ-s#bGPUJhyv!Cjg$Nj$#ma|-_hWP~tV zjNFlI^_U+G-B^)TB^OIf%B`K0GMkG-v*o||HOEeLr|D@6^sRFA26cM3O}3|#ahEbK z`K1-2zey|@v1GjXnkDXhJ1**QP=H&;dcj+%o#`wfsSMuI z%KQ2&&%s~oD3?kG-8LaL*gRZop8k^b&g@`?qd8Zp`W!(Np!7R6NCwZo8ORJ?6(;}I zxC|703l`x}*YTouaEIAXRT=AYm3$4D)ZKo|YyYoq1ifW)qGMWXQ37y@c4%3m zr5o&5*m-u6!>Id|OVY4azwhiuo+!)BcR2Fj{J~}u#^;iK)x6sEu{Pr?$KI`Y8PQMJ zX`*63!ENXFLPPB7@lHcE{xF<4RtBpfj#T|*oMzMwOna^FR;Re{Io%r@yn5M#V6}bAl=q7>GNi^_R{Z0S4zx;z7 zmG)yl$mG*FB-6q-*2k(#7x0oRZ(iy?*QM7Ec)$*Fi(CJ$bn_`vIuC!nZ>^sm60yeB zES!;jvK=xO4?h+>b@>$aa-REEmM0$mck72s;%!%o6^RjCD0K=SA)w$}DrTDm7l~@n)^+vPiWKGLeWfVQ|GZWVF>y$&=&>>E3|e>)S>K|dnK9+gS|(q|J3juvoDMq!yVXH8tRU2ZqJmht?VEzmioV)%}B z=ClwTAKH+(59Fzrvz};lf5fAd%G(&78F6NF!jdH|hx@V*%%heX;m#6+-$SJ6;a+9* zI_&5ZCZO;wIYIv&djbmPJ|zUCCGnZ*ZUS0DI6UGBabI}zCAo1q+LL1c!5GK z23|655042!vpZ<}TzX;=y~`za@ZzPOhB3$L-=jLnhZYM*I;?E^L4kB{Q#z0k!JtQR zkLXt4L#tJ^tZ@zhZuB3y>3xq)-bd|y@DXBx#fx5{`{?hbE`{@^I==M5d)jrG2XL_{ zKT>GS7w|$7BP7i-Qe#46K9-yWP<`v)J<84ht?x9@&n~ErzCtt7 zxmKb^c4vSONW-cJA(0JK!3YQ5s6^HO@Mh#0Ad0VQ>gsLDG8Go7zIUGys(PVMmg7#j zGOJ-c?I~3P4gEWAXPRzdv}O^?&pxV-s(uI#&F}OjzhES?K<6B{+)raBzH^*_5?S5E zr6ghF?e&$OL1O&i+F*tise%>kXAM+4TY$Dexf|f;Wc>%SqS&0-(Iw?|52(FndoJC~ z!k_8bOmZtx=C!I7?j+KEH2oktr^c@wv>cgv;?+k)hRjS%|H8W$FUa?dpKKzmHti;d zX{Hq#`fYai9y{s!M`~M_W2n#rO&;ZV!ai|-t#6}`5Q94RoW4=!oARS$^pWJ)nTtP` z2EQHyYm=TXS3aznPVuLl5D_M4qr~C!t+jROrMTv&u&5Jkirl7jyEefv^C$D%RdA}} zm0Z1W2Maa1(Bl_!n`bl+5dAGH0}Ki~uY5Nfw)f%idR}!D8TV#?rsJPU9$^MtfqJDHD+JVcJllA(5wGDeD zk5*{zVVxdEd)U(8{k~?vSkfokGi!FukgI-Whw0bdew=9Kqw`M~(PYMut75wjEqIPc zeb3A-)HKz{Ie^b?3!$XrWQFsw|LE3s6S6Q_Cp{0Sq9u1~aLoQrSVKaoso!4XJ z#aa33Vh+W+Tq*E%O6}5Z=CP)OregGeNt<=j&=?AK-f0+I6?&S~JY{nh6JcD3;M-ZH zgppw7=pFa;k@Iqe#>#Wd2_B!c=Xu^%ex}W=l$vximfboEZNYnR$Xy0tES|I?`qfd= z3)6=16b!ZTX&b50ul{YW)_5 zrD}UdVJ~Z9MvQ=0-L$=*=7vZTEM@AaWmE4va%nP6^u~4H= zJ{#mpK@8)j-><8bi>JOFG9gw*`9Y!jwb_%fSv<9^9~jR(bEARAbclnD?+%*3 zmzCBGus5EHj==gJ@xP-Xk0$kQ-Z-GTQ{2C9zp6mmfFg3E26-Dem!(s)MPYMQ4{zBOVVsL$hQF83;{0fnvq;G zoJA}kqFD*e&Z6t3p^TkniQd4gqZENAs)kj@*&?h{y+c-qc8ib|&FoN8v6X1ASHhcg z{B_JL8w{uK)EyV3^?h9aF%|zRV^;Zb^pt^li-rAFn(NQe99fTTSRRb5@Jz32i5`hz z{#Ya84A&&#u^E<2xdLaa-N~;p((;yr@5V0{r9&jfim+ZCJM$u##%2KL2^CTk|$C`&fRbySo!#({E4K*S5Pj~+evMVWhaw~wjXN*y-J;m>~Y z-e>7H$J_RGU+*0Z0UnMz?UnLht|-IW0Uz%t&@>>GkW3=%+0Iz)J8nVB-Ob85n!UcaJqz&xnn~x#7h_i@ero8GFyAyAMoFq(S+{G77k`ymZ~&y*Hm&Lj13Mq z|4`kLpfDzvVT&m51|M*2*{Cms0K4{SuA3j^y##d&qjToUu_h-G!U^T78o>n4;I+(Y zl-lvIKOxii5A`y|YNbMSUm%8-dnZ_4Jk=o^d#0W zW0LX^a)ATvXmgkl>v8O&*5x;4Wc_C3$XA<4?vc`M=Gj(n-!j|B-q^lwqtViHaJRZt zf%S?TF$SZuWb6F}QzFjBShJrFi2d@sR-*Q0_&b(h{k6d9q=FnQGa~tFpbGom;#qFvI^|%&n-a9W`I!oX(`r zYBs&B;4PbK`u4-7euP1hU2Fi}2J0_(h<|x0bCd3@E9lqBl3Uel(|?3E?{n0JRSy^u z^Ho1hhMOl zG6JAJSKJbYmtX%@5uG9{-4xIur@3dRA7J@mC=1|YnXu$bldY<5tLXfMmD{nwKIeq< zcY`3zw8n|e!}D}r z7B%QRJZpx2Zh$udUix!;?#4SpEXJGD%`jM4PE1(Eg^N0Ae1xpJQE2m8k`PX3mm1C5 zlB3!EHshg1*vz|=^aRz$p>2ZzBef^@lB({cSuoD}HtkgB)um~WnD~>iZmyQnL(;wY zzNagFUt4LjVb#uU7C%ER?}W5g$q5g!66)|U9`==6Ym-s12#4ia?ZFnK_tZOZOAt7N zUB?J0k=D9KsiIIw|Zae^i589&%tg-pkB9D{dXDH zoA>%-qfO6}oYd<n9SoTT@>Qn*Lg_)^Z&MA2fyErwOE zy_5zF_zOhWz)BS&9mCeKR4i@y0^sL-tK7*&x(ap1OxkbkI87Hznvg}lab

Kpjh9 zn`t3nuS1Re3>9wdq#lal!FX7i z{gY;p?NSHChmH0ri)pztO$kNU`7vLg5;5!6EFgpa)?j#ns*93f@s?LSFG zL$&kVQM^3MH2)@U2(<*P+a&o#p!RIeyT?w-VOzy#Hza&Nz1|WLJuy5#?LTjvPF6Oq z!=AKDR5^y&wzpnH$QM5{0VrgI>ML|da_j#FcIJ|}owufEx6wLIu5Q;YzkMn`zo`P| z2mkY#^-AQT3X9lHBo%jbIs}UKfd5w^!{~S$Q~d($ubN*3k7in_cT!VSlbCAke?~6? z&1vCPX}6|313+K>>bY|+x0xS@1+$l?#5=`}tQ8-6S7~*StSGky{~e@5*(f^In(d}j zU5sj%oc|@GvC*^PT@1(p=d*u1dgCo*lri^CTWf3s_hs{C=E-B!DUi$ ziB&t>p`@raXEm@w41-ubSZ&D!4hk}KeD1))n~Qq#DpjJWC00+voPhIqiRP%jGD11*nflwS71UaN z&yo%qmK2QY%bd*VxuM7TVhl6hkGt1=j_N9r8J-dl@s%FL7dqDm4!Je@Ns7FsjK={) z!uYsqogHYWy4lV6*i}^u`1;>AIu~ygBlmZ64ae+{EWwfGUIPj_ZJ<8@Y?nR{98bn& zLZ2^W1|GgMl!$Pcsf{VDS-@e?z0dpPMD$Z*sh&*yOjk%};Tl9f>zK>63xJofi!BFPxE0ZCwu`VTh?OC11GQ%A zm`7Zw?-=Z$#2WXpaQiWZ9`gf9c8alwJ;`NLle_ULk@_pf$)sT@!FY;yJlfA{qn`cn z9{MP;>bg@;E?gXY8^DG2`VyJhxpWJ04SO>TKs>COxquwUxjbm3`ix26Z|dY;x?6>< zWqFwqzyZG_sHsk^{%-b(q_V#^b%Ij#@`XX+Xu@i`?3Q|INvS!m)P~4nLv?+yQ3r(m@D+~d^j0VE zR}|*U?>NUVvfo=)labvMvbFaPk40f<@39^6Pp2>X4*@;9=V6xntdhtNt(?5)v`9 z_h~DRJnZW5@2TuZ5>75?UwP z%9r)7{w2&otT#Yk$X)XNvU$jd2P-sGx<>Umjfuc-$iwy_NHFaaJ|fR-G`Z-AgN^WR z&w;D}B_=gW5b{gB{#V?#F}MJA0TXt9VlqlQJyVQgR84uh& zwuR|Z{&u06ZOngJCDB5EaX-qIz`5tB{q!wmDlPLQu_6E06o$|5qmuXK_D$c3&6%er_s@#(6e0`oz-_Nh zcr*a}(r`UuKrqmPk(uh9!z0__%ibE^)`)LO@H6@HKz$mJ!|gX2a)YYsorQ#J8Nz*m zE$!uO-!2j{p9f3Szco1v6dKYylgBdaLTM&T={!h@bz-TBmjPsrQ17vXQ{YuwJ0qAF=;&?17zH??x6h>-LxDJjZG(0unX95p zLh6;xLuq*@`d&Wz2HaY$oM7VkIcX!$SLF~Tl^#0myw(t zu{_kDF?ul8)lEx@-qP)f0OKRvc8=Z82Faw$+Q@%_LVG=4 z5_s2je_5qgf7OzQTcK7SQbu;2RxxbI7&zooe}*XA4}99AlmZvMt>Us%<{qiG-~lSP zTEkj<@~FAG#NB+ZX%6q2H#|amn-HzYW}oYKtv6qGf*I zd0V>^dPCg_eKv`1+7^fL>b?Dwu7gml?)9ORSdFttY7!I4t~QRPV%DJbB(Dxv-jXUV>Hd&>mICq?hx?h8kPZSt)D`Sw(o~rg@4SMGF zGyv?)cTZ?d`*cnBwe}xhZ$1tTHU+L_<1WeQj|2Km72?7VgwtEpT|b2Q87vw`l&HddgdG9 zZeO-aslkd)N3uW&j&nTZA1N8(Cy_n`(ku7(&Efm5tG1?N_)9s}iyzl5>>XSAd*0D4 z*GAgrx#MAmn6xeN(1nmUNV#?n7$^AmgHN;jhR-=8BJ9ZgGeVM1xJrj8$AGVC)MSYh zcIedl6N~J>TcsCK7(dSfwXC1&JiWgk=uM8JS|^y8^N(iQdztlV$uh8U5xT5*;B`5$ z#AvvcD(T4kqXVDhMnD&}QY>AGF7JKBrxa6cS6^}5bW=%T^ctb=t_SH_#W?=ozs>8< zMW0OmgQP1!OKrvNaM3Bh|JJU0LDR|#K>(M8R5%IR zkly|90ycC%8bZNIf&1BqE*L}pe#w2D1c;l`kL6r7Z|voZMcsaP(k-6@mxVh~Ue|~Q znx=lonrc=I-YfWrkYY46OW7`sx9Qx9^iqC1d+hDh&RG~KO-aEc_BmnQf%@p%Lgw#l zE6|Lsf3G1f!-|B77F0aL8wF&I$_N0;HHWt^v| z>u}5k(E+wN>6k}G}p4{ zrpkw4^J~#pr%P;LmJ;yb5;xkR!~ zNUTh2#ML3T0f#hZX3%?QRQ3d{r+VWVcHxrmYH3}xwt|vCiGPLyo66V?2P0qe2KqT5 z@+kY}Ex0dYhDn^e{gU?+9s+E56}s%crm$CC9=s&#q6&JbRvz$I^cXK@GAqJ?2x7UG zd|RyCG}*nePUuU!Jzcu9z@O?h4F%bYgarsO3#t`5s*`?V)Q0RO>)0g`ceW-RB@1y} z?z1+CP(%~v?bMLd@(EdmSjpGjwKGsKC}EpFP&qT45dn)JoieT1Y*CY+h?>rT5!KPq zee>!Vpmb+8ZqI4RnE)a^$S%*hAqn|R$m-1tv^%Z%3n9$a(?s1BZpA8J|(OO?oML#Y|*&PHr{*6BS!wRdQGE=6uVp(aJ*SBV4IlDkV z{d!9jC}8~gqd*-7WNIH`f9NVxmn}eq&m#8%+E)&nXg+;SVi1q;^zw)gr=cC_^WVTY z*Y1=__RrCgG%9p3kpZPN6f+*A%m)xj{fWKU_nY@+gV%9S@l>6zJ6~;X$4S8!D@IKN zbqf!v!mGLR)2Vuf{g#!~pI~TN!p#(FFM87D(YKG7uMBb~24^AUA0xDg7?aIT3P5bY zA?gzb1jb2!nivi}RPfDHF5%eo4d^WwKesbW_HX|kj+=^?J_lsjhjkb+@eh$|h2Yo5 zN;BTA7wugsAjM&G#U+bPFotVP0Hi+nbh$PWZf2nd?hlZLbIgtmaP27dFoy!)L0F-P zYi>_X&<7Wd)1T~yK~{UFyvJNxlP}AP%5Rc|J)dgh$ec~T%~rmH?^!HJ=APzR@w%jl z3NwHC2tB*Tb%jeU5M!B=q>g zsjb`-fq%ED5uFbeV=c9a=Rvv=cFdh?=l1H_IgY3;b(IsCqCYxlFTzA)EbS9oCr}=i9DB!#1zzWKv7E{_)-O9W570P`8kFCc z8s3+L^Ko(eMXU{%!TAyxrM%`-9eI*Q|Mu8Wwx22Nd;>)_rwJ6$yu>kr|9#BawEKZ= zf|xkhd5UKqoF27Q#U%MQF20YR1A ztInoy@oSE4S66^o&kAHYqx@&28 zy<+v2+%0wUJ9?*D_q2@QasNDHnc=NZ?(rXB4Kgt?@TN0WhTmg}HplVf^RAG9nJS0O zDhAlmFVAxHYJl~xiSI5>ZAqECS7`tK{uPiMeqnSp6Vjh&lu}$v?GU;&wTi6WWxy=c z5NqcA_GOs`tM$T6i#}$jgQ}~CRYjZAo+qJ^8jwtO14O2YMVbC%$JFNy(LDi(D&2QD z65?ngtt0IwL{HIRePP}mQjVueuKjHMvCDMgnqQsmp0wH&JvOC7jj~l^u;sWe6Cx2> zw4&n!ic^ZNjdWJD>PHT{u`$!M%J4LoJ2j$s#Bq;b8t5ot9%F(%Cx@Ykx1iGf-C$zh z(PXEeg%Y0mlqXRMnk4stb-D=!i>*IsA~j9>+sg2yfaiV#&!gTq2742h$M-%obAeqQ zV+Lzjpnq6TV~#8$c`81rI*XO~E(}a7m(tiSp`up;|AaEM(tgXT5p&z-zVd47%`gC* z&hQG`$HB*5`G+^um1l|+(4k~J z6SouG*PyM@rS2nwyRnVW;%s;`&c6;a zKu+@CsE&7EhGFjprVhlz%j2?^)UcT>fyzf7-U!G(4^W2~7bDif@sYB~VODpg7Dl5nW zAbLnF@!{v0wpo#Ll>&l@NhfSd2g}EmgN0oa>Xe?7>+2zt8_tSaYjW=8aQZ~GdwWua z`Y#N@)DXYv;(_}>4jVCYBMmsOEnf1sxp@QAXEbxzN1xl}v9tt{ZC2%|zZ;;-Dw{0; zo26Y(7y`tC0svfQ>q|9#QcC?8zegv^so5vBJKiN|?w`lDGNa@Ey+fTm5*;uI4uf7O z7AB-Ihq;0u7gf?gJ`dT3Ji-AFU-(iE@wa-P7lEh|HGM(n!~P@EL(c>%g+dnJWGek< zq@O;}EM>I)^K;ZRi840t`!QO?X{IB-F#`d$-wzRwP1L$ zaNu~1;n9zpc!nYKYvcrEBS2_N-AJYg6;@q>>MLh`Cu#byt+fDCFTw;x_*K zwz^?AoJHJ1vmY-?SO}cVvLB1+WePnMe1GmE>$$tyn54jfJ|3H+P}0>_bJsFX?ABti zEO+-b#gA=7+* zh2P-NfF?5RirVplO=ZB|pQGCGaHVrAy2YGsDH`zRJkCk4(u31;d5Ydn6Cs#Y)#X&l z4m6aNQFs1V*0_GLFB^w#-B6%HJkYK=&q;$tJAK^_n*K`*TVoHi%Ut}jt~MLFmo>qB&UHa|;bx8=gb0OTJTu=sY~tqS!yd6~_J6beeg$ zuY4tG;;V+|_sTkst^eV@$$K2E*)aSr8m?gXp2kkdNAn4%E#eVBti`~xU$ht)?JFT% zmox*1+2D%fsHUiTKJ}4B*_0gq_V{r37hqmfBHF`$S_<$pHiX!>fI09S^g|oixX8nK z(dTXAs7(lFb2E?DycqI3`nE*YTIWDD^a5o&M3~k`J%kK+yIiIhC`Zogq!;h`yJ#7aU5|RXpv^l|s4}N1q|4fuNlJ<>W^> z19|+JLrieZ<6ojYY_^J|W$g61_XDYQvVeKxEYTX*yQ5cS>)gSpV&2gnwd1)|%rVFj zaWCz2A;R(T&odV5jl|Iv2M#k9W^lS0)U zC?Q#v0+K}91-^$oTCfn#+=S?!(D?BaVkV_OE>eZ<5en`zi}Yyf;L z!O!XSl`XmlHDUjsm-0**X$uChX0Wxo+exg{-_HH+6onSe)M{AEx&B{J!fN>G5)j@d zYSLK~JY5njthpu)l6Qj%-JlXWd^F$34ksJMsDwohCl}WhKB7`D(fe)AInnpvbBmgC zi)Qa0$J!*nYFsOJjCEr6i(kn`2_-iLyJvQH*}2dZZ}tS_zVCUq{?z-Ao9zV{R+D-t zwV0p$=6b2b{&-b`-W~J3pzNIrzX$T^^$})z zW+5@5=LWRO726NC@-(?JiKfJ9c66NmfcwK)ZoKae$jfz@vgzx$MRNxNw)R~@ClrNr zjr?TZDGrZ5KobQ*`PJbWqJ_FQ$-;z8f_6CQ{&6KjU+GW6HeI|b;1~?R4%5Fl0nf~ zsCGFfvAqKmE!!z^9kKOu1inbOoYCu@#SUjxjLRP%v%%y4PPTUqN@K24tu-^n=lMp8 zPk%7j%DN{L(|n-q_>Z}7(Lh`QG97a8Y!gakmx;uLSvR!zgD!hR_N%T;80D?kutS^+ zp+&JyuSw#?7l?pF_M*bz!x$rc@JRt@ISUjiy%9n-Km-PyQ*1_&VGX@Xp6Sqn7j5jO zKdR&_-t=4#nKL@Y=gee*0(8yMk!$1-?MS~gp0 zZnDKZ^I69p$n;@@xlVPu?vbu-!ST6ps2%C;>4~=GVb=w#f=L;g z;L#_rja0NKWWHHH_+Wk?8Cnm(?w<{y_w|BzZMT2B!fOo8<}B5!{7xK3{wTK%qO3~~ z=m-Gp7-*$7y@BhIx7SC)K_^`$v9hiMr)S4kfvwiHLp}5R^+ieYckh`hFW}duAe#*| zX&Vt(*CfD8ACC4{=Xp9zFZtFVR@20oe8NhCje2jB35(5s?Bw*Be0|!*3WN)~^K%O= zt|p=#2UmKik0pcM!h>MmU53N*X7w(6wf`zTzpSgWT0_TgPl}Vs{0F%eI47Uans-6} zhPO#VG^0y1fx;&S5xctZj}Y)qd3>lKIP1;P>V`?yOMJPGf6kSl6WU@|fu>Qa92IvV z!OJ9WUTuruK4r~P2D^OrW|_A*9IjIv2UNer_k0rtWSy0{6a&A3e2+e34}&U%+y9lj zKC2Zrc^D_RZDiKIV}WTVnnBRrsiJ*8WeWmptXM{&pLyYiyu7wK6aG-y?#(!zsckzk zK!$LNv09hn_2c!cf0S7?U8*?VROGVavUl15TN;EEUy*Igfg&HNipO*VdRB`f`a8W| zcn&f%VQ%z+u!dg0B4R3oglr5%9gP>sB2ppISiS2oaoT^vW z0G*$ia1@`KP1rvbq}Hc`zwIDa0t&6Q)%8ynda>+jqLVGARx2FDmE4@Pj&Jj=L`9k$ zTOn7=>7&;~1@)rkO!e~wNTN+co&d_3A?0pK_B-Dk+g2r4>G7gNdN+urPVYLjy+Xq& zvRJ2aERNlDK*7l?MDXI1+d$mUN9c+AAv2@!1jY6k|;on>JPkk$!^OTkySI#XJ#ZAGUYL4yLxx|2gerEQM21ome}$B*gN>b zz_8S!)sd{S97{Q~DQSNsFt~4+%IaK&bf4du%tD=o$@B4%sObo= z6RMPh7w?40OL5L9-zpCO{lm|R^^ALB`f5@RLBuhKu^kk>R^9KzIN0DC7MP8cMB z<(Mc18Xi&S&_Mx#VAYN~g~3|wH}@|)6{Y$!2A!lEy`dIEytq>rZ>RL}2wgzgj!qqE ztD?35vwK(&rPQDvDJVsya2SjE8eVL4mKhuyh{4gbAG z?gJEWe)#?}IzZ=xaMH3_b0tSi>RPp6!vCIjuQ{s7EI$YbyvAu=r?KVy^mDPg!G%6_ zo)z(RW&W^q$O=1TEmhWR@lw!F$?Sf0BsD~*KX>qvF5F9QC&Wn44-$NOG&uko0}gKW$*^Cp$!2Z z3HUDFhL{~wyXS#P55kC{L7V8{Mwo%gOYkxZa|$zE0AedVwnBnB8SBxLXQ*R=+>PdNb=z{9XW8#Kl-8HC&n0$5?hDxZ$B75&7?vsJb>veQbY%uhuv+Y`ym7+4*f|n ztFod=O_hM^YMsr|+nYUnyxtF(+SY^y?-Mhol>SgsR-Km4T|8e&sN|yBdYDM2`vCsX zm^zdfB=^1~HcDdkt9A6)to<*=3C$p-EhZdiyI#KBNkhhhuqM2%9z{8Z#OWwFGmcR$9O*p9Vc0FMkTbqDER-F>-DmG9(}bK+9S`Dtg2 zoQ-Z#9nF;I(=NOh(>pxhBnVxx!9|B)Tk-P~qM@v;L0&#@cN!Gkvcy3f?I5t{*mLrK zae74lrb13KO)9g>ZM~(I^GYX`N|T(Y{pCoD$B`TDdb5sg8_o?M!A;wHu4tuQRnFgD z%>|054FsJ8Mf(=dbaerAGUVvBy<}84Pnb+>++l6%k=D$7zoZrMTaIF8it||Uk(rT@ zO*g$DSmEU5{#!T6xw5pP0G@5$eK5tm2sSp0E^WW3{{(FsdN3CCUXdM_BGU-EfybIYz9Y zy{Mi5>YeTGPOOr@<-6YIHH}V=YaL%a&05Pek51>nHunBId1=-ozl3aE(o-%kwOso{ zNNYbYD&IoCfxD*(w0iy)B4k_oC*9BG*bCH?!-5I5V0`ZK85|5L-TS9>;f&pa;L5Fj zfgMqEo*CtyfMY88r&jQzJ@d|#&#F3+VS29Ki%MHsCVhX-Nv`3ncANd?fS{){tp^m(qR#dDuIQ(kKxU7{I08jo9(LU$QCk#BI`JVY#BP8`GQjj)XltlJv;Kq>D%(FQ;kTA6 zpUf&Q*+YR*rXXHa^Rjcim9;VBxJeg&78PQRr{3ee5rCw$!weBl(0lRa9#Iku0lfv= zka5@r4IOFRnvln$O=~xuBH1hZqmUnQkjP^L^`_>xcM_Ajs}~h3%KIb7w<6_)v~>gS-}7tiHmA5SCepqFf_BSN zpBjr{-%Fox)s-7g1U*DeEX>IKtqD$BYgrA~dQ6v&Z3tr1Qf&Ni^fz$w5_;H5s3+M^ z`MMBz@BBy4`!z@6>5dTTyao0_(Ii)=(POLYQ>*FL0Kbp|H)w>KxZ#ne7UzltXWoE1 zG4_YzA$f%AjunH^a>H#+oc$~yh{1XG7ieqr)*RSxp6f5s-!%H7y$1FW-AwaRQ5`e* zgTn~>4rA>GDmXn1OSrF$(xDVJrc=ys;97F zby;xh9AgB-)m^;~4Z^*W?F|cB=HYuQJe0rpY>PLdYbAkyi)c!dS@0wM)!4U{?QcOt z)vr^}yNUZA&@5dBtUP|e3B(9}Ejbb-Pwp$z=}y<z7c6r(^J~+580%DKY zcK+>WJ27J)ag{gRYiU=&Z{2(!`?q*bBFF2XnN!+vu|ABNZqArc{hI6 z3tD^m)z6^@^XKFs2^ zQO-YBS(VkV-BjKDMmNe%5X~a>Hi`nMqs#6+AD%&#OK*bdGDX6Ah{o>FhSWe#s-!i^ zVxlHE?0eZ@6uSSOIabPhR8B_-E)|fVpw~o0fMWK}{5y#us}k#acPlZVCa4pY4xyE}CV)J-AE>-V9N*V4B?*oS zO`$>VVMVV%gE~l;!U9_`C?xyfKW5^#=|2{ktjqO!Cp!P~HkH|)Am}YwW!bHdH)PP- zhGb&D4TE^59qgz5}!h7lytr_|+2mt7L)+tNRE4aQjv^BxPdlyrrIbtRA-d+}*{wtgKQN;HKQnxa zK5F&Wo;+g59XKfLn54wR#)cL;MRp7Qe2U|?u=A65apgjz{_ODkVLrPhmBjEO8gJ>d zI*%Q6jZ`j7B;Fs3Fp_QkrQm>f>}>YrfX}u^HUdUwkVOy)YDACFo{3ox7PKH=f_7QS zJY^eWGI=wBj$%H#y9S)-;M+Mlq+EeJLph3-MjP_7%^np3%a^dNyfo+9p^O|JOcryj zXY91a0vfF>EY;z-B*m3r)a=*ZrR7x*3c<;!8#WAT1y&>)91n5K_g3v5O9)_Gv98$3 zDY*&$p#Kgo5~q$rHp-~d)>;KO9?|X2$}7`0wdMqmR;*-?NB#dqrNdV}QgFrCVq`R6 z#H*hEuFxCxJ4}>Gl$ZxgQWL`OR$(T(W3LLJqRX}JTwS$^-;g1r4Bc+QE37-}m;5C^ zYdR8GQ-rPm{`7rhP4psPXoO#tH8(<{^N6`PGIxN|PlZdx>?#@@CDF4$ug-i&VlhTE z)Y*qBWa|S(_|z+bB^85qUP>AlT#o;KXl2+kZP2N+VRwt42~_DniYyF~8-4cgX0tma zbeS2lh4D-4&z$KA3z7;tvBmv|+-6bSDr~|qqVcI$4cC{KguR?&)D`3P!Jfbt zX&)8__O$W2i5B{KOyO4Nr(1fVIA5aku4neqQQePih!;QL*UT0+>ass5?l-P5U#B!dGnhx)Uqart;51S=*rsj&XwpEh97#*DeXi#4()nCb zh#k>RvqgK#ucDO~%6-t|7&jJit zh~hKWMzM6@gU?=67fECUv#Adr^bQhjGhZ4OUT~g21~+c7jM^Z>h{|B5e%399RKc52 zFmj{qX^T;4#*PK`#|9ZC6bX4)?)_$(H0Q7tdwO(Ud25o!cw!nhPbi_>tn@U~J&AGSPU)u9Iu%y~vKHX=S z1T+>1Ki`29+>n@BI{`jIF0tFV)y+!7sgcsT1FrJ=1Z|}32Sl@btt99sv!dS*HR;3dfL?9i zr?Yp_Wrt?AgX}X_5U-Ych`mi^D9Y88EhS{?JT<~tJotIni19x#Th2qFLys=#$+-$Q z;;u-maI>S}`ku(>Fr~0@q5&;}hB|%KS9!pQ;~>=a7ICb`*{#jx#1t705q6VmF&^88 zu-6qss+_p7=G0nOPvxI^A;#^*y9UAmw7FeC3bDB`$O8dSJ`w;aj2LE`CSy(|@-<4L z9yx2eaKEHobIyA57<$A@l|1FB!#=?h^=#uJ26i6vO3qIDM0_-ew*H6ex_x@J`#2xd z*n%;x%;umhaMEFzi2s`~7waJ%-NuR=h=YmORI+zMGQn)3xa@!tWCw-^-H5L&U&erc z09(l|GdG?{2lG^p`#&zjZwt&d_6|S^@lowBLfN{>{1G4`W&H{dx61-am&>w*4ma@T zMSvv4i3ZujbE2>k1b7YlRU~!Brk{NFiSDkR3;L8YmN;gdgZHV2*pydneF<92_ylU9 z#8uJNo!~ng&)f-}HJY$uiaB|-d}P2IYnD3cwakJXaC#|@0ocTfzEdLG{FA*d*HUg9 z%m=$^=%u+IVgQN-bmK|`etc-w0ohB$4GWnw&Y<8QXp47!?O6H@vxx zNzb5|_b}SyZ@Jn{&Tw(A(VM~{5!4n%zzss88u1R~X2-cY7r2at3 zwd^k*abX8}SsP~|cW^sPMg>&tTYYjixs-MNNu^3B@%=7i? z@sYZ_x{tqdGCk>lXi5O?&bjq7Hq<5ibx8Z@5f92!Wl3I{jOOWC+kMycV#}K;b3YFD zvn!)ER;|$%24im=!jKRk?An1T{l$_*$1u9S?kT$D$zz z8IU-8R<^vQOoxt5(dWkavhK%FiTbf`KMp`GPLLak!k%RNS;oPerW?Nx=mX1o5$Nis zbr}-goEM&>Wy6blBJi1dEd{8f7sj*C%T{k1yFf=+e3%XS= z!5}m!xoaOxMm+|LA4W3_@8nbkUer)tp(>RzMF1=_Z~>?iZ3=%pGvmlp5PtP%^D(*7}4aAnO|ksP@aFQg<5JVKkztp~#+Z_FgUhO3DK<@4!`j~B}# z($=fJH-e5*v6}TX1eYUXec4S`7)Jgy0T+b2sKQ+(Y-G=(cdw*&EB%i{E<<6PgQf65 zjS##pfk&^!;95x!rWM*?#5Q9UF!=70)!Yeom1)#$AKJF#_OIrAJqe9cTKtaPNw_|0 zyDSMVD0_W|lubyzX@Fl;q~w%(3&XCv@$Y{QH=BnTn=wNG8b8#12hv>BGN3-nQ_NT0 zG28k~r@s~sQ2JNSSEHl>1xdz3^ zZL@CrjV5kCY$qx-TvHnlSgM`^ei9b~0^t(v#C0O)f?+Q%usSCF2|jO-`E`$H3;bC4 zfPca~!sMYQ>c(X0Bz;k-GURnYt3vifDK6!!up_2i7P!!3Q(6TCGIHH`b~!GsZP)Hy zJFi6&-h+LJl!o%2Z-Ju9YZidlr(KjYFE*Z0dw$*xZV z_*B)_u^^f;-D^aq#Bb);DGz9S*QYYbg4CWPa5Jy8y#ZXJki@>E3StOL`D?rVE?bu> z%MJPc-0I*H0a#31ZrJHGF^9w* zMSu~x_xb;e^3!Kpd~GF4CRj*gcy(d~f?4m9P!xG{;Zvl0cO^#(OHNCyOePmw>gCM> z2RBYL7zGDCOyadTZep9T&Rd;_ANr}%v@K`82SWVJ|4ZFp_-fn=WcEEg;(>EiTewbOVYI{n;CDb8usuJ{*bY|pFfp#OMktZ(047xcUK_5>0{F_>h+Lk!vVkj{1 zUBQ|XmIZ!QZs}ZiwgWhp9Lg0YZ{b@MzVYvEo_6Ma)6DdpJTpbJcocE=*`LkRIHyeqh z%kWJN5b;1+zJ|g@SFmJ0;didWVw>M}KfBR+sm^tw_s~v-5F$M{ZYFum%6SDxr&Q7c zl8#&m>|xJr$!a6n7g9_}H|%A2U2adF^(0o3=0JsNp6T`nmf>nQk}gSIz*z98w7mUxIqUP+)gU%UE7_$9*$R zH#19*x$IpV90EfCu% zv^f8-P4cEa?=N@~owH!I6jshU?DoXzL(bu6&UY~2Bd8N#B z&-ciA*bdtj4k7G4Gj2E(!l8OVwUhG0&E!mCS?HJvRM}OM(N3!qVe2NmdGowh8z1Sq z;8z4oUSO3)&@#&V-_+R3iY(uX$v9@iq%@Q`>>cV>e6@1E!3msR%BFvpA$zaZ!alV&tZ|11Xr!S&aU!dbJJ>$9 z0_OO~u%(|o5=j4J`MsvT^QTgjyvg!<1vp?r*JlpbOEB`$14q&lo|mR$N~GUwyOZ(`1WI zCE139tRcYzfZiM2G0(M9&<4=W=ye^TU#EbSg6WkbkdlPrk@s!wUz)!rUWgC@?-0Et zfBoG)VBF~UC)}%);!pSPJXgB?o8`~Rlf3EMT*@lj!8=MDio9ODL6F+frpV;BeGF>W z&Rz$i;5;nuycn(`@`%Z!{;@w6u2Muz8M$iaf>Wj56UXxw1@IK)zq$b+5BJBLJ*w{9~f-d7es9Q1lt(T!(q(-peQKsnm;wkJz^)PsdzFANkm&ZPBjeKqBb zIYGnZ@U<(zrJEiM5C?9(b|b3i7i2~73sVqe_QVN21H#S0NE*Wf!0nQ(RFMb3!;svN zlC0ZZ`*vZo74O-J%L9Z*yz_7WoH)_b2suiN?4AsK zEc+4=>;zspM>uaY-A&KJ4%q@IBvpo_-Yl$z^bTDNdI}C!yAU7yv04Y)J^kBI?m~6! zXXdC!U?dc$!4Pm74tK!l)TfDyQ%4-fXbjOm=KK}dW{UY6SA~p7ohoAWd*nFjG zSgG_?gTDPb=wt5 zk?pg~EOp+VR$0IPJ9iDaKGhqg#yZcqitSYu#a-QuOn;}Wc*-pM3C&fVo~! z3VE=YPx0-qxJT3dwG<-box@zEJ5f9g#sF)FoEun&P4$8%M2(qfOxLOG{!?&oJ_SH6 znS<~7>yCU0VDbY?H4B_l>I02<)SQ*yUVXWB%fRa6l~VYLf%5q zwvvmnzv{lsxOO7UZ#w8!9yDEyvI`7}KTUMF7kYb&TK^pIbZEfO+}8xNQQ~mZl|Br3 zVR`vw?*4SM1G&sMndD}^M?egvwSZE9{Be$f8{f!l^BvaC2Y1oz$?*WE^wP1k`)U3+ z9nKrMv@w${Y9ffiT>s^^qHmUt+mNkpV?_#Gn1%5@QuhMVb{X#B>};}o|4bb_#KexD zcbk;9t~$7S*bxc}$_#)g#AA}Oh3+*#ZhXK$DJ%c#?dZ5$1kl2KCj*r&>~a=GU1cG) zlOzwj@=LpPO9)jm7x#q%@623wPg$1SLd9I1)Ve7h%amcdN9DFnLE+V;B4TI@+^ys6 zkz`KUCxp0zzpqFbTv;gTFYR8FYdskneNXlDN#~54k!9$@p`V@QU}Gj^WhisVZ{(ec z)bO;9L#lWcQH5E=B?O2ret{#_mrS;-BxW^4;Roi7-WBGS!1N($)K)pB#{%M_njz zF|W56S%vtF%~e%@!)`Lx%_arG*$&lh;B;Xt>AVJ4eUxl68J~R{Vtkl zwzOK4({cWkWAhC_hdcUC&IzLZO_(&A9rsYlpG%`)uE^@!m`6duBprCu73g!n?(Hw{ zn+3i{r_XbC2rn6Pr6!`Ryx*4bA6U3x9t-*>N*xu+4;1Zhc0%{cBt8bm9IIV_Z~?ma z^2<(^cv~K6D7|jeA7T2H`0Opd3-qlXm`GDqsO-7iNhY(+Ks58R#a3{H(E@#6=Ek?ILn9}I^ytkhXE7CDeaDNe587)% z!;MMnGPImlh<5HN$s(%d7K3DO7uS!^#p4TFUCl!60+QEW*N4~8+;bwv6^pj_q#DLV z2oaJ&g094Bu&pIFJ*srADLyZBitxZ)$6^wfH~ZSpmt3&As((RuceQ8SCBIfi6@-}D z`qi_JXNW8hJuy0GcI=&9l{pvZM$ASm%+j`hl+HSrl~c(LM89S%>?SdH0LVKG@H?n{ z7k@sM;l8YW;8!n=p}x1O{d@fJtJ<**?WQ#szjX2NiP@))4zJ23!N?%nj&kuw@j>eu z4u8tDFV?i3R)#+6pPIB$9ldNS-QBqu4Rm`olW4R(5z!zhyccLsl{RwEWW@Kg>^7{n zFF9eqILq~xPiBt|T%lp{u2wSyyS)?-k)0p@DLI_oHb(R^tO8w})l9tK+uh$B{9Vrk7Yxvg%J5emFY@!r|Ya81GQkg*96c0)ZBu1TM(TmJ+C5YWkz5hP7K?% z3=IKF`Fy0qcF`xKvn+WW%l!i*c5(7vCDBzWp_BD~p-pQv4aZNfzvW1Qow&C2ee3;K zwKJA>DaahY`8!yvIlTJ)q|QsW$Y9>ERTp)5J{!UJUR^D+<&cCa3T2{wjxR6}f zQF*$e!oENq;=?UnhZvi@;p6h^$R7vdq!#HbGamLM_j*U}oyB`ZU2!kVeH>7B`1Z^J z&?e%xBAL*F&N^pa zDX{iUGV=)pGiv9_>oJ7mo+C19FE%I4ZtP1Qz1iX25sX&uX=S3nIz|>|$)(MO8wh8v z=S>?;{VO-i(o-hWr%=o>K}i9j5ODghA}#LkO7GQNMz=h6$Y;8F`ueOvM0ncDr!R>d%Ime6Xuh~sN1uVz03rJVIM{L1>f&;%=|V;y8}T~a?r&%+OL z(8}64x$Qa>Zi`BAyRMKsxB{U*Gq!GC1q{_b=Xhw1w5KuOr-t5bSE~~!o+^^& z!y+~Gon73n&Mfz+@(#86w2<_#XjX5eitA||d5b`OQ>TL0=yAOJrUv)G**CPslDc_) zJ!>oGo9loE0)_68-%>9Z?Kw3qEU6v#eIEU_VhrH*yf>1Ae*T+@r6-v$g&5s2=SlQI zK@Qg(bd~zUoeu)59K>5B;7xVv$-5^tP&48}nGQ*Um+!AHSl%y z%xcnr-p9|~x*Afc5g)U8mOzH0+$!c6FAzaL=M8iH?y#BU*gzKHal&)bb;7)?ag+SL zmOSCyCbM7wMce>gUh#zzlaR^~%}XEid5_-~6IcSNhI}Dbhyacu3$3I!eI1GW$~IfL z)^sXNjHzWsaI!TqLiSJmE1Y{dLF+Kw@zoE8QQ z$y9rcP4Y2`n!EnPxoOYSUYe*bbEi4P4sQZAuN@#E3@2Cej2V&rV|rsSda01e=DT!F zM+wf0&0}PI&T}Y6@0dxstxRc;eDOic5lMp4=R*03Amly59Ti@isS%ZhF0Rv2FJe8p zZG_;842}&HFFGl?`DDiBX`mUBA5YD-XWYOozZEs+oVoOAwqpypqq;g9R%L_Q$T^B6 z$G!F5hq_VBi{PD;(U_(A~G=P7|f&z$cD>6?oJm z)FZw6lz6ka(xD|fI^08*<0Y$IS0Kej9%@)x?Td4L7&D!ssDhFQ3x({;-fqH>n5AQ- zNey?vhgidG4?4FVWAB|<4Jdcd9UST2HxH?t&*?U|900ZsH!0Xi9|ZVu?qL_)IbM`= zq0H}fMQqW88?(sMP1t#kav@L5SiG4c-zcfA=-DwUP7*{UM<2(eFY6QBJ6gG{zZGI7Thmp|9nD`N{qE^dm=KduyVq;+QOMOB^aJx%XW#d7kwjAE zemcARjXL7?gk8(O4m#2tiXyfS8dn7sYc4B3B+0-Z#>YDfpYG>sijQJk$SqUTd=*q1 zRU9?A?c_A(x=Rfgjn4;WDc)BkA!TXbl!~JDD zT#H9>*(Ku3-u$0p@5=#9g}3df%-p>H+KF5F#n;z3a9w~sKeYw+!|*MZ8MAt$f(sqs!xe%JL?J0{t>qTN^G#nDW&6T<|I zq*=?H_Ai%YK0i5@hkV=mcc1Y*%*U%-JV&boRh5KQR^zT>#TbiH*`4lu`d7+xPs^7r zEPOGLoK!S?JMt^yvsH5G>ObF3(nmh{_V3ON3`0mO{n$JSF5!G}U8{M*;%eRRk6Knc z2v=T{`K(WkYa1iO@;#`*J<@@Ey}c_6ua>z{Q{@Y<0 zNjf{yyxrX0Le%rwy8=$E{Jn5@#VaEsY;*llE%I>;8=&hO-^B9d5xXjLbiRV&ZufL` zsKuNIac1;n;FYRO_zrraPRlDVLj)|^eRI)nXq5OJs|CkAWI-^Ncjko-JYTl@k z$gBk35x=BuyDv5ybg}Pxpg7pF1*x_et@jTRn-94lcWzQks|3jTlNq}mMf_&akn-Dj z-A^PvTy$#Cd{m*;s8BhNQgFYb+OJs2;c-lvNgU{qZkAx!Guc(IAfN0EHtF)aC&HOa z%QxcI76A31^a37h*VDuWv*t<12inLj6L7udoA%n-%wgZPDr?hOJ;$DegWRTjLvY4^ z*u_0tiv-f}_Wq4}G@DGG%V~uq1k@ya9Q; z0iHQOU+hr1%Mp6c-LR}`);I?aX$e~W9ZkFlIFQ9D_9SsA1T``v9HeWT87Ib<1Do4R zT-SAR5PD+3Cz>2U8W)_gPhdz5xcEB3g~+ZwDxMyE$VB8$lpSmGld4%|l}GYvrHkwf zI7hG_kUaDL`~lk{u}6A$UG6dP?I&6WIDHtccg?29c-9ZU4p9dZd36-v4A5EaeunY_ z%~|&;^Wu3d*Nu~@Sj%Qk)Gh$-MDR}w_Z15HT;0OfXIe+p4bBsdY6}@EW+$oBBi){H zDZ~FvvNpSIlUPVEashn%;!lJ>7=7qK=*kIgW__mvJOfs#RCb}hHqMZcHAc$X7O+Gp zra`&%h(6^*D1PQcSKiRA0s;4%x1^Pti*sg0Mjr<2&ofd^>uQz@r}-CB{@Vgb;A?pTOlNDfAWW`I$J z=1$mH+s#48(%bS}6V$JbHb>L5USCae?vUaEc0^=FR$YbOs|ufak5TEITelCS1mxFx zXNIJ8l>P^%Kv}<~-vPj6VARE@dggjW z|54wiE(X}x#`wef9sLWQjPtysJ^=dBz8Mo4r$yw2H89f1I7FKG2GFM<>YI&J00>#5 z;yVFQjrJoTAn)P9S_Y){0I#@wSeF9q2w)B!0^bS%g#rSGz&ZZ_!qK1Ea4zs8ecP-Q zhPr^Kj9zF?LZ2{8$f3OxZ`?+RMu0oU1;z)3V{eOfWC@D}x}tR{Ssb_(WX9zk7 z=}t{GkLu$Y-}~XApc5a%eA~CmsIo zdv3wLOSoTPWzU3MJH{=5_Rdzy{hz1-YVdd&&AvGlaD`6etL~2>7W`U4$nJ zMHCn8&TFKd0TiUZ5u6dAi^NSoM!`V8Zj?wHeV*OUC|psHX+fY-UZfS!0*Z^cPsVxp z>6vS|6Hizxu>SN@#-5or#`LnW$R4%)#!dY2VAp4{E8WK*p>*}eueUv~C`J(MyLazn z*1rC0yO7s%FTGe%Ru%>Bp**7B!zZpE0Jx;Fq+%t&H-T}Na?~{;`KHj`QDCC%KJ=3PZsTyaHMFaq$OMa)aoXNw90S$t|eqYY!B zw6plvihF*0hvfs5;t=y&J31&VcA&t`+A&|Wd}#Nyl}3SbMfqp>W08bB0Fq@^G@2B=GR&Lc($O;xj(RloRg88{)YSI(iN~@U+V)4uCb=u`1>aZ zi+sy&wUyiL>TBFIn_FYS?`~U2ed59k;+Q-AIvvYc$YwVv)b3S$E&} z%_4BrUeNQfKwo6xoW!Kxt~Lp5$4$&PzHSH0!a+Cr!78Clzx16}3&0i<0N+1PdD5)g z^>8aLu$G&0OAmL`eK)xMcOUJZc=2)f$m10YeCTQb>>x8TF9P6!6(X!0O_(qtlovpo zZMNOUjlJ^$_vQUJa}&3HD)7B__kX)j{yXa%gHDlo7vZqreBOEI+1@_yj-ONk?oD{% z6L;}i{|rDRI}SI10?|{Vk0E}*1%LzrHJ}T`5*C)F0CJEf%9Zq@|HLofSioz5F#zNN zN&;j9*nxZic)%(e`e1-Q05|}6q7OaI=1s^p%>PI?dL02=khicp#~c{F333?UWm(h( z6w=v5-bfGD=g14!fU5uwqI1P1KUj}~)aIx|>JyzYX=m;W2v49$`U#*i@<2JD3m|{! z0^mVeqc^7>C@a3_=)C!!lMaBKfMZrRpctSRz_7Fr^iu%h0Pmom0}P0c6at({nX=%B z6|GdB0HlYkM_Q;q(xv>!g%98ntO%m>rd$NF;ay71?Ew%2LO=t5e|JO*73wcF6khzfd2q20l)z;6@RC(N(Q)6 z@{H0R^WYg46)AVkNqI*6^of+xu<`r&9bh2VFKCxoz(j9HKdVJ{bZMv)Kt;44ESRxY zKtAF=6~_aAP8&fVN?T*YG+|hCVXT437doz(=)35PnFq)DhBs|ICY}NMMh=9yR~X8h z^^zji_z6QhL1vDL_Db8JEn*pszLBMSW(?$qfB0k(8=KMNX{{*BPx_3pi?M*bkT=SN zaf5ZDMu@Q&9`F*u3*7-$EII8#z&&);jIG2+xiZEuhSwtW1OWB;CIFJg*aP1JfOdeD z8Dl6n^(o3JGsW_+Q&#qFB!DeHIqSPu~4=ep~z z3!rrRIRJ0bFk5G&%xEhB6InZ~M}R3=_eB4PJPbgTZ!!HnkJPTnf7%c8!j#X;5-3Zc zEP=8Fij@H0AI4d}H+;9dYV%OB)Rn&GmJ-nXrCxd~TU6@#D&LnS&_N}@E+`ZYjez!u znn^UEJqAc7(+CV2ECD+pm?F5a8x&%~%!FGMPqOta&t;OIg7)x>!U7?H9gIwv*>%Mr zjRFf7g$YMLptwTNVDiso8A3QhU}0w@iWCSX0%_KwBny`vO(>8kx0pz;b_vDLb)8jK zD2@=wC6vi;qb`C6=~12=4PVbcLOcQzJGwi9Xk+}E5qPky7l<}s0yH$ughs5xK=AEu zN7<${&}7inyLWSUr7yU*$pIuD(?6bQN2yM9{cKV3;14I7f_{N;B=7nCmUPEYG4u1L zsE1H!vBQ?dTNdeeGeyx6f1R}(IC!#uJxpkAl*k9*Kup7PEh_pE!oS)a{e?b6e=bT0 z=%KwvD)uv5Cxc!@{tTm0mTS2JUoc+9JA zUSF$~U*;MpnYXr`+JJ`DXPV(vD;~XL%mM6#Qk+F1%@-7}TBVZ?HFRQ zP78J@y_rWa&xBa4VPR88XN8AZ|7>hN+%`Xaf9iAtG87!ATjx=*yYq;{!*7pU|K|3& zUcFNwtj=+3&-^CsFsad39K93bR77EBa7LYGob{W`yZlwBG0$3PE-eUgu zvRP=ETZwV4qrP2?oz$7ER1_%_<~)G+YAyWa@loHUZfNf;nzQ&KD$;M{w>>WXi?~Ex z6|dqe-8sJmwz*(UlT+>3wjSNwTMvKWp1Z;}SR&R{M9Dcn1M_MMt^Ai6x=5&pYj(QV zee(P?w@&{d?xSD5<|Z#;mY3gc&3J2F0ouE9|FLe#)%w_mL9^Dp?1E+uc9k}z^TJ== zbUh6`*3zTJ-F)C9ZrXco0TyjM=iC7Fg8&l({(JlGinSfaJXyZa2|C;3^xHL?hbAAy zeBynEvaSIRZ>DJqJ(Qwr5A9labP5Q4{I%{nUjPvL#M92n|1FusW z|Ni&@6a!d;z83u|zygl51&{@xj-&Sgd;y?Cp5;0G&}YVkAAnDQJpeH=Z)Ki`)jzBm zkq3Y-SX;xTz0hu?v*22?F)}f6lMeuE`lb>e*XSq#s&oZ04;$Gn?x)Vl4*)LoRBUho{Kzq@0FMCe0SH4Uz4FQ{ z2S2nAbi1KmTX{0K2Z%!X(gsKeX%M&szl1@bj@7Ibv`1M2*1Fsmw1RT03N`w%xeLE zQN{p`vg%blu!u)`(G}9JSf9b50M_zE@jQ_<&<9!1pv`FWAV8w@C{)7JSB4E67Wz7E zmv%}#)B&4VWMx%xF)vmhA%CQuF^+Hm!#GMBLkG18>yWhhm}qk^oBRWxl(6nb{sAk} zM)HV04=|4Pg|sf{_sFV@L9`wEZcze!MP5lG8`}Wa(r)rLmE)KAVsIz@G>ZVb(GHa! z(v6G_u#UVk=2E}NiHu3`7LhJ~jO(;T zH)9(_I*1G15#v8~4Ila)?T&o&qkaJcl2-aF;UGYKkWQoG+roD#wPK0BgdZSi)`LhN zpg+b|0LH|bMdXjZ0I(5oG;58d6+IvILb}j-F$QRjg7RVvpzioqU1oq)+Bcv=;$iH@ zdMAB~I%S;%z%T2J`~czsx`q$mQ~2Z&V?OI0^fib&q}|q{xISz7E8mqRP?kVh0%Zv_ zUILm6FmKV^rExB0h_VEVl|TvHD^^aWuMR2!4RFn}W0xC30260~egp$e>anuNq?ko? z0BcOP*+B)NJOLn6YZqX>G%=|s{Y0W z`tH7myUjp-YnqI1`|jMM);^md+P4&m&4A$4<^aATsG~fPKwKm}D1>QO>?*BCiVHy> zQ11X6J7R@fz3_UEqE;Z{_ViPfRwxuubdoj!78FmO`^P_C;##`zXiA8|2CBOySRb7@ z@uQ&R0klV+7Bj1kEHEn$js(8%zIzQc+~2)2?kjn$!*i5!C}J7Q=%*+!?z-!)u+tF_ z^%Mm)na_nUDLZzW=P7%NWQlS_!H(jP{sN(t<2i~zlyE4Y;_qYpohcdz_08^k+I9>u zARfkV-l0@0sr@B@QjqTqb%r95dgU9>yreUT1p(@jcE~l0V#LiH0tGtYWr*F-Si(SI z+tGbhV<_>U7)L<~0H%>Ko3JIduLM{aKnctO1#N)ECXO0I;mi(ih`v6|7EExdAFMs| zy=Z>|x7F2ixg5}=|(2cy}O1HAj_39C2w^*x11HK6?qBDPIe#qP%dB8Z%aTpis zgzo|M!m(6*Z{}xW9Bp*&#)1y@Pk)KO(?!aW#WC73KwFM#OTz<1U{RjF+S!CPB)$Fo5D{JW+EMqIjfune~5UMo~1oHEgXF#b{Z+?7IwRbQ1CpF;_(vdz-L zY963HXrs+Hbp1xW?>;*|vLa)m{5BU>z+%g7vZUMSuvM!m#j4t!r#ulXvH|8>boqsB z^I(g6^rBaq0`0;0)q6g0Ph9niTl?!Pg(H05zWc=xUpmHB6EVMDFflM%c&XlQE$d@_ zj#|(?_vbghXaUpDxIZ`c46!(Av3s?gg72P5hzK|i{ zPdpGuIMQyIM?$1qKrr;GfV9x}VtH3b(((-h?7DqblGe?1rWnLCX3`> z?*Lzst{8Arbi5ER(ng!ZdfR0NmeKP#eOv=%!=fE?uQYuMLmNhaiA5|xgw&1RDb4Vr z+}Oy<`U2pk7yyVr06M&*Y=cEhdzSLSpPrE}z&gk}^+0=sONlfg>(frCbAY25IiP=` z7p2eS5%1}Ptfc?|BQF4oM5GZS4)UXQ7sexw`J}G_+@c<`i1iJ|3EC+jMnF#h@8}bh zw*X3vV~hjDL7dcq;--C4E;?2l>5wZ>PFzYGA1;LO0NIS_RZTKgDF+cg| z86Ye4zgZUfh=a8g+BOy#ixOipz%3D=Wy%+RS?RzZeFu=?0F&z|-#mf`dO!Ma-ia{i z-f2UOt@sxq=P(vQ0N&6~^PX{?z61d{MdoG98)mu&b)U`ePn3jzfx*GSnEJ7p8n(mOxnoWeJoe(D@}G9bxCkRK_r0mw-(GYLXQW zGve}BmO+QK0TX(>P2!v8Wnc$yi{l}@?fCKr9IEoN-7emgLgC(oaBqWX^{ELo6LIywN#&*a2l=&0Dluus_(cr!P=7!uFYDHf8c=! z!nG~9dJ`zawFx2K1cf%?il((0eu-3on&%f^XWejbN3#V#v<2u7 z(`Wj%4?DaX&LqHD*VcZ%@zBE|T<_(UZ}S}9$4>ezJX>_F!NJeo|Ge98J$SJH$DMcj z;lqdf$HtEJEA6^#cz5yi86mz4rcDp`tBo4fhM&r~mJ0**yITIzcd9qrZ5PsD0&7EY zo8s1+a=_c-xi^8fW?02#yYReOy7UG_t?55aEB6Kxv=fh~g|r%IqT%o{PgAIQQ@Dn7 zoAPWnUP`aRoN@Z<-p|;}etY_F9X!zdxA=!1de9FYIx-5oh=1<6H$(ae_tsk<`xjn# z+s2(r*$f`Mg`YI(xxfQHH(b9-2y4fVhcKpa53T`^yeR}i88KFy0w;=N?w<>!Wvena z>q#M>26pn4g?^@Z@un;f-x}IYGe1+XSIdy_1_rF&6Tg98yn$B2w~hCFCrp{yP+X=E z^0r74>d=&3)%24OQ_6Z<cOF3eI@EX;#}o5t z@3k$IcvGBvQ>y#)d#O1Uaa8A4(cHgY+(XYkCxjE9zC+fpmQTH~@q1(ivuM&LY>~2y zd35$QV}@Bp@RTQU@hwoCe0OXyu6nQMbBb%$d3^jG%}W<^QQM$Z%|})pFh)u~w8f(E zy%WDI|9VzS7Lp(FNnNwj;SV_A0FV4=Rv1G1Y!S&D_#u4v+aqVBan-tKZhNWqyHy{*{;?}o??+y?PNj|5I&w$lr{vRIS#EOj z%Kpp$yP7vkZJ{m>*#01YtG9X9Ox>*d)h~oN?O51$^J*skIXMhvf6PZC{qnuWyc72)aY^YZi)vZffW4ckw;@wfI9{-p23$ z@Rojo6)R&qe$Xcxpe^t;OGwo@AnzdD&^D%2`u^ds4)>=Vb(%L|hPMql-mI}z(}PTC zzzlDIoS+L~u4BL{Z=3q6GKT?9f`4?a=n>&h`q7cD`0!9TJT`YZ#O1)YFd@e6Z>JMOr{8*nQ4w+$@b zfHoeP6QX<}1BH0}A(QfbM{j|?&j6Z%H)&$-&%B4W1EHgaY(q?VP8lo*KHjrT8#kh)J z@yMzt+KK_QLw<>0erd}F*z*RGt$wTYPA=tT!0u`oJ!g%QyxZwI)$g+6HZWm`i@qcO zsms`GAj#k_{-<#~ewJQqtV(&7_sz}4%W^Brt!x8j8;IKgdT_pFtjEO9%j>e86lnwL zH#_gXBE9FmwdX}5EB%y>vR)g|6i@vwrE#7uIw&9mfP}BROyzytd;X>~~ zFz230x9!9U3JVljTKqu(1$e{)sO=646LSJ8F59hIz2+H;6)m756rc=1!3NnbqBefT zqa$6j9_Ve?(rodd5MsfXxKw8->ly)ODO}9Mc6|9}1=PHJ$MFC%L17mIIrG95eQvv} z+P#n&1NUCDMfAnj8xj_Y4|%!L9=aJQaP|H656=#N;2}TZ)mK9sxWa&Yhrjc#|Kle4h*>R?rV?p}R|2!7>Uj6&u`%z!n)nEViJK_1r zi!brJo^`fA(fr_VfD{w0@l@lr_K;o!60)7qP4#Ehos^F;;2@1B?;gP#z*F9PJy%>P z`-n&PdSA~)sb)K>z3mVUN(GdZ0xHxCtM_r7W}PV1MXb!q@`o}45X`Hue&{EDWWu_E z?f7l5!9u3+`=j4x+k^ZyS8wE>d}2HQ_+#7p2OrqV-tQlN-1`}$?RP+U{@8!E4gMC1%8~CX4*v@ zzPzs8M=19cIBG}IhPfu=Lf$->dchy>S)?(G9G=BdNOQ!Zb!V2mI;}6TpbKfyjP@{U z>;qxm!Ul!|pLsT1Yi^acm)Y{rI>*`ml1oE4=0D8En3JVp>)lgcEEuo|fnOGJ%_>f~ zo-JN`UyomBzUSMZG?DIc&D@?vKb3J_c-k?b13Q-9n}wNb zStFmZ&__7hIh%>NwxgbA>o+Wj>YJ%}kb?y7*E8B8K*)GODX;7R9}pM)RC6rG%|>J^ z#ap_cZy)OF@(%-FC;kJxv(%Rs4!@AM7FPxD9XWouAGF`nm3~x$_p0Mbm$re{avyTc zihlDSRzOZ$4j$@9|7B-??8iG+fnWmYEw|mGe(`mCdq9}{mfm7vKX9LZ{?KvTRMWrm zZcF%K=M3^amu`GRD|wH{xz)e1bP3$EGRbNa)X9WAx7k8|=vk|` zsk?8zu(jXgmW}+Ve}BpMSfa(Rz1+Hf?B5^si!M^xJPR<5ILx|G6%5yX(Qf{Tx3~A- zd}f&M@rAZ^$@g8Gh10ZYqa^)a?PlM5H}}0jDEJYXW3o?kRZ!P>ulX@It?etd0(bO$PY069GaT z%mvI+SnvbD0$@MDKLBnWsRxYnlU@VpqD%l{n3cA$;nA!}g|Y**1Xv{v0|*NF8K4St zF?7G=6&*4}egPwy<*ER%1DJyTnYlRnvS0n`R{=apeXxNFnTu=ULH7(%9}v9IBe6MQ zCEMTws0yGIeyLwbpbvm&5St?a%aA73aheC_EdXh10rm)^F#@m-G;-v~045~O%$K#{ zE#;ryiw|iwYi0rPnU^-gnq|BI9HKp?>kZi?S+0y*-7UHz*xM=C+(W=4MctfN=@50?T0qWWp!#h zbFSS7zXtN#_rCl6Vry5FbHx3PhaU;=vMlZq=Ge(oyd8k&x3ltD%>>93?AeJ=KkL`* zzpU-*-q`=Z?svQS7T?_jQ?s@1M_m59YFuAvRSsw6QEddl9%2VumcLrh!~=mA5G;MB z=$L>%`bf-Q{NiW2);k1mJKCafJPJ==WEY?vFBTN#F;Gyv zC>>EwG4|LID&}`*f9)TK8Q87@+38Yx`RAVx3-^rAd@uB44A=J~E8nWetat5w#lV=l|B?glmp*ul}+qov64lHtny_6BQFnm=@Ss{#1mGrHa*near+i?x=D%;FowJsT8IRB|1M=RWV5 z-j!~#d3i^bD+{B1e`wqEc@|q)2yLVuP=ssowMe))?eTRQ7C<52Ll%=zsPmq=4nO8H zx{l{o_yK^0Jiv9zAFIaSet$wWY~S@ORzT9Y>)kWArQGBX&=?D>%sKRIPIc99Ngtp% zEThb+X=Tx08=mT=kwr!Tda`DN{DmbU7FwA{3YdqbTNY+xek?r208!rYJ!H?ZkgeY*5*(7nj`^Hd(L==mFR>@N1Qm z4(u%O!fia}S6(++Zms1F`0l6)Bm7~nZ5x1HW6mAz$2?K7lm>8aiS-xo3w^Oi)oR<< z{yEFKUbpVf?|JJ+et{Kc>2WqR%WvCUux7(`$u}Zi?51AV{N}QutpF?x^<1`*YPr&P zj-R|^u&9^z0YG>CHeJXsx#5C-;Z=M1z3%B%yKCB;*wj`H1IWi!E(_dgvL z?FEYIu~>^g;;rrd{*P?wd-k1Gx9K81^`%V}=pkYN`U@{VUybv)Pj~cN{%keh;#z|4 z3t&t=y&B&`zGZg2YsfR-O=NEJ$~PB)9^XmywA?dSvLhG+I1N91pZTUUuS4g-GeA-* zE6EY)Aps{y#v~q2Cjq<%=mQy=Jj!C9!grPnI1msX{Q=^%4D?I@_ed9~v;cfe<3~@* z{FwO%j~6Htuj)!M8A;EJ(;bdNKj9l8Uh1HEZluL8!g4nP>v zgN_fP%_+}v8IP34W?evKfW8!;${}?DZb2;Ov)%$BS1T>Z4d{+&g8;GsdtpH_P_(yUktLP9=A_C~1WRDy45K94=!XaRcIpv^E#}(Y5kQdr7V3%MUK! z7TO_(JoKFYM>znZ)3ekSP?7Hf@AO^f8wH3H;(HK>i_1+`0pmVRAAmT)igAZ;4V%#F zQ5;8k-Q--{>aZ-kvJA>LP_}`*Hh>YKqB_bv9_91-vwhY3hUCP-01;!2*(YA;lAQ@#!cx=AC!V@d8+Zf{MnA5&}gC{w1Jr zjWDlzF9HB85}(qIV8pRl0%+=a5gvJd5Qq`t1j1|&PXYH>KqS6h{&YiFB>vX8m#gIp zP>x0CpH82laoOK&V9`O_Z0lFF9cZH-crd*G`oG79@EoxVkdH7emex2QTD(5;gO94u zTWm3&=X>1xH$SkYn?L7~N5lOVKmA$oGs<=hKk(Y?{tSyBf2-JT-qRnMJOHX`BOajV@!Z*_17$Z0BYIEO`lD7r)K$2pT%ilXCUJr zO6Js`?om2oxrH$Rg^r%%{_u>-QMogw0lcdmnHvClGq3gg?$h7D^`;$%F!OJIUB z@7Lc?7;hHrqV_o_UQeI8hd<>9mCXQ3Kgvoe2Pqd8a{%<|ee9xSi_7@7+iuJB7l)Y> zt^wa`cW}M*=v#+kmIV^#0`bmll)3a-7Mo&=6mhxKi=%wsHG(aR3aMgTWuI4Hd}~># zl|S6nZ+g#MLY7_PaPhSOW-M-KarT%`Cxi^7YvZ`L zzTm=H;cf2IF9P4iaTmRYxA?QbgkryF*g3%;)^D)1N?MeUIU!w20}Eg*MC-nuOFN8@ zKx2`chJw`3zu}tfI+1`r3p2iR1V3gAeInP0N8#4)rYy zR%AK+VTFx!a6RUgZL9uxw(<8?_QNk(yUnw_G{=k6EJ|Q$CidH{r}JB$IWWYt+P)R5 zv+?`+`FgRzy+XMFu*)h39ed2X?u&Qxi>}ej_gu=3zBTKJs%t=eqaXXS|Nhjy{p$TH z8*W<`>J}{doigpx1c@uyIbb`|P04NY}NV-u01JUuZFNL43v&}VCb&f;)C`N>a$9*pw9@?*WcvRRX3fB|u` zVLk4jN|#)8Y>XWcU_E%#mUvG;CZFiwkRwPx{S~<)3-~CW7&IWC$}7Ancf#SyA1>_x zuoyrwToF13%7^sutJPLwe(5#x0f04llaKVrxCF7Go4hH$*d^WcU+Ro&$-|6SX?*cX zM3f&gW6UePW~>=MetZD6QC?~KxF_uZ-jIFM_;eq)7d?|3mm_1jfWGi0|EyKvm;4|* zktW(TeS-QI02@E_5Bxxc2Usfu5AbH44$|74(g07&7fY)E_^=MBcj6ZU+CRzx;|+R` z%NRx5VeI|jgAYQwNPC<%{E=6_QINuCm5q)GPK_0iM#`FTN8w|ab_ro+lIP;hctyVh zU`QTQ5HET~2+(dEjk;)5r>nd!Wzez>lx?7F10CB2*yx@olayuM zv1L84m1n(gxZan*J)20)Yn4Bz3(Wlnhukfr)+mLK6z zC?Qm8ajv~Q1>A#oH(Nv$uh$pEcGD&qqJ_kl%0DT|)fN*iOAoS{Hqn zG3y_z8+h#OmtXbYx5ed=S6t~2HJ}~5hF^MF8}Od^3%J+Ub~vA5yW%$b{>lD`_s07% zS6<~`Gvy5aj{WaTe%~E;@L#k#d({AAm$m};s>kJ8npKu62Q0yzamE=z0jPIHT!qIk zfT~L_xuhX;!dPs?1ipm(m|q&d-l4DpFpIMIlv7TbO)yTw=^h0&fD4Edj{V7NtFk@ZvOKu3qiH=@_D?A^RD;-deUy=&Y_DcTPV^*G{e#aa=pcOp3r+@C*=-8_S@XA;NZHOzfB|Iv^AR%iayk2Px_kKJ>(9slP1ZoBVZ<^MH{ zYxFNK2*>MkRIWBhv=_LibOYRF-W&t|^=?jfSzu(*nX*1IJMYrKj5G{QT}FJl^u!9YnA(|?R}X89UM7HgHaU4^zv(Hf zgzGQv+3}Y1^Wra`n{_I)vb<;K zU9)43DYxFsb@z*`*3%#U#&+S@Rls0zdA2OlEda&hXL;AOJ-q|eA)t_+;jVM|av`1T z9=UwgZ~T6~y|(&j1DxzOtG-kwC!Tm>=-#9D9aSy6UQ2ba){g*uxCcMKl|SUAZTw-c zZx?`>9BsRF?`1-`MHgGNda?z;fE2)__;3L*Ku?W(GNey&?0eVdzF)Ul*2)0fAfLid zR-OP=2#gZ*jIWUsAU1C?4J0N^697XAg~q6`Hj;Th#hc;?$2mB?ll2yhFVT>(ELQ$ftZ0b@|E9RCSG zk1_+`!ZWV1aL6Wh2pLY-$er5I%DYjcX0^Mld{ZBQna~Z#bMP#`Dt~lYP~L_w+EZ-# zG3F&6Ito(y_t=lFX`29F9)JAtP_FRRJ6!QX2LYv)!4kQ_u?a<5&C<* ztL0Mf04K>SX{8KP|KhFZfOQ1Eq>WLRdY8I!Sn3o&8N|j)+xgG-*6t04^N~ z`m>+?EQ}|-XFY{JC*Jj3)(il$vN<0j?fjHKp3zrG3u%gfln-%cf$-vZDKi$7zbMNt>*)kq`1jT=Ws@2Des!%8DO3oj3p_pxc5Nk7B=! z?X+*ec~hoL33U~}Ctdt>jBae5oG}o&6H4^}#7UV$@D)$SOZd^oSzm%uT^fE#7i&eN z4Ztbj!gP$lA36xyR%%UH@zLIi^A59=NBQbGF6|uBcoz3b-tTXQLm>2C+{f?fpR&xD z_7~bUl-EA6?i7>yM*K|IGM*8qyS$!#E>@OFStey0DBD1@ZGiEdxmP-tH_NXKH~+Ln zeXHxaC2-FsQDw*I{4xO0g%YA82eWwcs|7&nGJf9Px<#18EIL9MJE0Kv0E-|@V95ai z6QBq}9Lf;Pi6ejjRv;Y+_Ymnofg=DP0tbsw?8uV!2h69-VqJUF-E4S-G15u85mMMK z3ecjSwL;2)1zqw(IZ)4Mopn|Kp#iRBp-ekY*{!JKAev1l{5kRiAqpPtt#gz$)D6m= zEa9rzbP7lp(ij2vcn`#ji>zI3d+KS` z1?uCbObgdNmzt$yReV+8UaP?SZCgbAnE~@ovUty(Hr;>6+$)}W*6(6w=hw8h{2SZh zdx||HdMA__hrR+}NA<6FDX;js6mIE`wKNR(3dh1JU_XwzrcGw~ z&3Zw@ISsg%mLKhnzKCLr{6Sa|V2tHQJBk4YX_=+` z()V-HJxZna9+f*Mv<{y1z_W&y0aKnXWYx^m$ z&+C5i#alueA9`@>HfdBlW77cbkufCi_#NsPAQON!1UwY&0GZ&yIENyR{E6r1mDe<0 z7L3&A7#o=zq<-@5^YX&n3a~Q3u$_0_xiG)Hc#1q{e#N50?grvw14fZBo$ZIQEsi6v zykb9HajkSPZ(~yob2#RxD50r0+ARw3F=NJrBXgJ|vJ;=hHqytq4Uc#p!(uMqB<9P6 z!yoek%?)eKE9N>1yQeAM zzx(`)AEDj*Hl1=7vJiuz9cyesmA zMfst`fdsyM1LhztM(mk66pA!ybJr z7Wnes7kM7*fA~gc&HvW@cBOsb7U4O+_yJfcP~510eJQhS=<5K|RnGcmJocZ*LZ4r9 zrM>~gM_r1ST!5KJe>l=_`|GtbdFPcgpc&$WUYhZo$66!D^}=`LW$QG=D~*GBAs_~1 z9Aw+{eR{9_@*I#(Jy1*uS@hIw|Cly!@Usc+Aa)z*o75h`V;%DgDb!0JMN5D!&*Gt%7EFx{R zE!A-xM|_QcK%TUTti0k^<%VB$0069H^^v;6+86DYK2DwT6A)E#DX#)21HOWod*fH( z*>j>ithCgKHv z46z0mhmEgEQx-TCKj`fkJLNyCE%A<1H6YgUd_1#hRT511uqk&?w;j~c&bz#WtTD12iOS{ko_djj44>O!-=5bfMY35KE<#S7pz zh;c~qopAgr0d#cTwHpU_PO!L_=8?3b^rjwAa&UwI<)U96Xf%tD@;$e>D5U0P;9gn} zfF%HH0i?S9_J{myubNWJtU&(q{5`Dh4-0*Wew1gqF6EbZAGeM8I?sQt=PRvbN_b0S z3fv^5h!M7kTk5k4L1_r@D<44WR{sd-~Z@^25XygFg5!b$^kR_1*u zlvyYwFT@AP570c}NWqxnp5ZTT?*U;CM*WcVOAFdvzlcD)16;~PxctG!aX4KojP6S}UzadHl+E?mU*Bc{FH8(N@n2q7rd}u#N4Hp z?j?@5J=a*FLO-1H;M5WHcg#1?@x<@ZO#wo|N*S^u02=|e$UjyB@sErQF;8ZWjD859 zon&IknF1NbuCC=Lj-PA5T`_6nOze;JaSz4M>s@0haHpMi3NmfpyoULk_@i^?=;fF% z;1)>p=lETET@P%;W+I4rYZ^AbhYw?2{7l!#{JF|{J;N0Gs5rA;7Am04E0;7wVHNM{@`N1}=VV5zOee$s2?ZAeB6NN3tM z=|_|UKgt9^DtzG258zqKH}4(`kt$bQwKeg=%^M%CyY9M>RzPh^k6d^`+(SAVTl~mV z(Vx;TV-D0?R#^NYOCvvDd+oI$Uj8XfwOsrWMseX%j&V8Yns=0w%1wM}E3q|E{E=4b zkZU&LLMju zm#anU^J)03R0Lo@HF9|S~57B#p>5UIxxWt~8W z2oeyBvD%TuPBC4hsG)99JfMJPM_4`ndJpiAg*z!Kc-|3Z1$aV>&du=43}b*&(s#@! zlft5Kf3rNuLh=q*RqB}Q@7eFq|F|v$}V81mxqHegOAa z+*X;h3yotVbl<2;UDGD8vPD}bd}et?apDJx6$VAZu$YXZ6Xh-auV`8U5XE^c5|?<( zFAKFO1lZ+_61rBu*?aH3L;e8nh!3uK#m~4-fqMXmHruQM*u=Vzb^yXu3O(`IYtN+u z;OJKuu44)hYZDfh-<)7^4=ZITMX4(+b5b4vpjfoXDvPXV%IDndqD*64BM*s-uw%7|-&%fZ0ryBF;|I!K z6u*FI^LF(r4wVCW)iV|_Soj6z983TJKmbWZK~!MThP>)KtoNU{t2DDyoui3JI~Iun zXEmBO@giJ&-6)@;{mRYl&}2oJibo-b=7^5G>{MGF3i_B_UT{$ z`d0vfW#xhX93QC+XhoYAYUOD*;GWH4u(AiuFe@R~*}Ur9snh)LZoJ8l8v8()-+b-v zdwriZ*7Q65{*U2)RqMAcW?_M&a=$h1W#4De8ncyutu*AlOUsCbT;>MMeF0oxZ6Po0 zob;TzMb<_J^1~wL=aCN>mBnN z0R{Dpxmg;I-U0N~Vwawk?kqWEekqHrb{7V4$>MVZq!n0<9ds$@;K<}7| zmG4cpY+5()afG!1e8HWvQ@CG#_^cx>6$XG3L>w&W_gT1)|6u0()$m`rzGBs1{)@VN zzo6G%d#(B%ED{#LLVIO2fm-scofk<*d;BoBgV@x7WnTa-S^VO91?++!bac49i{EG6 zXYtH>hBYsA%*=7nqfsu9(xrIrFbiH-)Q)+ee}vNbV?XIN8+f3USL!dmXFi0CA3wt) zHyg9#XIa;jT|F=gb45sC$1FZPgOBPU^~1ajYQ%arpgk;mrM$SOd_-8Z<5?r+j2>O2 z&D5&PtoV2a7!*R+o)_0pvqBzIDudBZMCdy6;^Mj5n*8ShmGC=m+&Fm)@}kmLFHeL; z50OOxW9#uj=MPcuDFHsi0$L+u(${Tsexo#P+O+CvB3WUTHcoG;Ro=Xd^Q*FB9V{g_ z^XKtOd7!&bDFp(?Vc;u3o2;hly(}oEfHuOAKT&MKkIjLQ){fG2(+(j{UrOJp3@{)8 z$xsIV^e(T@vpxr1b=6g~@l`zH0XR36gLSko>k#rEyYwxHbqo{v&m4| zDLTIln4k*S)t>T3Ak)#8fax%&FMe1{(*iOAZM-9yol*4`(!~b>gJaydXP1+NPIk&5 zV6bSUU7@6f#VzeVLP-L#DD>QO&xLF4%3?A8$tRx-LJ04%7$@O|xL6EhN2St%6+6YF zXPxN+Tth&5;)y4`0EX-+t+j(s=>b4_-+lK5>p5ha#afhcCtUQ0jMKQ9B&IyZht!prSJM1 z_#@x{z<=$od&0ZJY=0?mD`U_C7CUGd&{KkC7U!td~R->ark7WdfcMmt6LYy?~xmpKcY)O&m!V7)S? z?;)T942gLaxn^f_e2hiCxZp{AQZyIwpOp?KX!LEAiRli()IZ}A$1U7-*Ik9%v4DF3 z?w);S`v7t}_?wj@i5`CV(a;xAtO0t11VY?&(+z^(x8JIO7}r^6fpGtM0{0m6CDHcKt2}WT zzbP-~U$k#+c41>gBZ~;1cbNk~)i;%SEc0Rjv>F4IKINTz+B0pOV>i{qKJd@3YDa|HPALMFx4OBLInU z`pWCtWAEHjK4C)9iT`Sq!x#7ZLICu9=|`(p+y0JMts9&uZ}~-?gtD3b?oB znFIayTjm4z8l7tvsTZ1E(QniyC$AjZMG4$%Ti-1UR*ty+<}<^Z?k^`85N|fno_$lY z>QDem{LcdO?HsNr&HOZcuc=eMYtV$5XA_@B3bypyczH4LXHi3dyY`eJ);TyjG3EjPSh$D<75v5T;&6G_2qyy7 zAS4KsC;vCybW?Tl1>qC|{DWTrU>t>j7x2@$$}A2yWcPL#Z|*S zN4AaLeK&vh*=Gl#T|4b$0dK!Y{}cT6UZG-HGLA>rvYdy@(Q(jBQ*_;J7GL6bL(e(4 zYFTfH9oegQE1DvtyQPuEZzwheuQ-ajOkQLiu}JySPZ0P~CKQbWWdnexxSbY>YuRO& z4GX2@v1nYWKNhS}Ca^OPss&tOQJ5dQ>BOfjF8+O5??B@s39#EL0;O81%Y54e_woZ9cmZ=hJZY{$KaBok);|@!< zX%Bh*gYWCai67hd^S^EUXXXDp-#MwS@;)nE`YaE)hd=5p-WY%dU5*`r*on;okG|_E z-@JR~ODNuHJ17C+3DEV3BaR5y9Eq)CR`Pgs#dFmWa|QAoFA^}`bwwKHr8xKoGKRAN zBx{TuWhH=6{GFRq^G$ysZG4+iiZj-cAC%a%=PZB{z-4XjP`;RZr1Njyy=97gfO?v< zXr6QYlxhAWn}0(9eDS-zS3d~P7mSM9&=cDuVZri7hG6X+8TAq zbGiY7ITjm77~dP^_j&qPpC=8>dt++`ot-Yg(R^o{jR#PuEGh6EjmK?3mGMF#V;Ur& zAhJSM+<;Q4%UWrQ%PhVoOnP67&)oKoZ@|2qzpki{&IBP&N11HwAu&VBr*Yj2i0)^J3i^euV38hs8(rs&*NfU(s_ zEtQ$RM&q7W{CDqD0q#+M2R=Tu;j%e?@{a!K4--p!_T9)VpH>Tedbh(B>-wWV7~v;O zAMa0@`t{InKb$$i&+yZIceB7h#r^>9k+yyB-P|v5n|}$UBtjF!V^Uh(;#U^HW#Qj}$fEPB#MX8e>-_^2PCmsA)4ohiYaaLj2% zv`5+{aR{_Y+(mx$Q#Q3jS^nV-VZDcRD(!N4COuQ|*!qvOD<3?Pyo3Bn`jvlNc%WyZ zd=-YYh{zXhm9ka)lC?sBh3J`imj#$(^PYGy=4-4H*p4`|fUy@{bWw;N@SJoBtgVR` z`JnxgcK!H{K%98NGv!VF48s&qmX032l$m(RMHUpHf7A2W6@PTN5bJ$gy7?7ft-i&d@oX0V{ZU0%Wsg9#4Fmmmyf;51~cCGwz2D9qRAB_ue)xJu`uK7bq56Uo(B8%hMai2{ME=TjmVTYZ2UURtmN_P1>!GLXdGEfBe$$ z@jd0Z*Is*tUCUUqrLQPG&A3v=#p&wm>lC=RpjoI`WDx-QwdW)~dN@;*&H4^^@9sjl zg%|D?p5ylF)y?|R0znzwo_y5;?qMlFJ9Fc)gdOZ6eODVvL#*(J4{eR`xCbA6uxHWg zkV6hBJf<`fce8#;Ct(3bGbiGju|P`8X8F#~;g~D3@P;B8m$J|t4n;f)g7}+>@|bog z7ypb6)Pt@&x;)_zPsTE}fiw;+cwmuP??^Z4=R2Wia#_^NI~w~l`+fu1W4?p?iA8$Y zn{WBPy?TcD88ZcZl%4`hogrd($~L zhi5@7tGwpsJau*2!+_R_|7zt&*4~DlGpJ2}8-B^!0pPRju&%SVSu36M`n%P>zLY=y zxv%?e*4w^KJt*IuH5MbYW__C#Z|tsjWzQZK$i0rtaonp7CP1 z9{u3fewm?*gzFKPu3a}y=54V~Q*qXF*~|&$>CEbd=RMzO03)2V5+AQvBy30f$?AIm zOPFtyM$ISEcqRV>jzKaS2ffR30ia-=i&I6giX}fOA7liq zk&~ZT&XIq)H;blm_(f%x~Ya z&DO}!01Rt|6DUfSCjqVj{1m^~MJ`vG;>{+sCEARREfrWS4$C!ZqisW^jlAi^8u_J; zSPKyt6MxK`>se%EBmGLPfKK`bb4ND#i!UzUBGO9T{rS&-4(TAj0yz|KVaR3 zA9W!sY5?s71PlyX|B+tmQvfr7XWG02FpIqMBMyL+Y{HG#hvIY=y{@(S66@T=M|xG( zN&_y>NJmT@b4}dJH~GORgots1pNKvamlM~7XZ?r#!=L;@v~$KXZRY2M8xd<|v<=3e zw0*-1{WooZdeGR0@d}7Bob(bl))N70g8=0bj&zYG=~83AyhBeSONv?R0c^~I80+E< zri3MZlqvcY?&F`@u`IO5@3>ZeRTrsj3Rn=zGCD$d#%S~$sjR7ZsDJvLHjwE#eU!2T zxL$-5u5{bhZ&@B?d6aFSYy%zH1^`s&jprR1PswZEX}3kb;rJ;xU(2@uve z9&m0dTg-Id*}4mjMIt80q+b*Cth^vFAyDcaf?*2ONyEqY+Bwr%z!r8}q8vfVB`au3 ztK2v}@wML9a@n=11zErZ?3$9mGukYjpj1G(tHmSl9TrJ2N1pe-+2>&p?0_)n@mX@? zO#+V{uDZrAZ&qmVx8=`ffdjX`;KJ~X=foo`fA!)^-vdyA*e#sCt96fX--rc9cv0ri zHP>9zP+S5CA`Gj1YNajf9ZE|`K;A4rdd675;$yXAR@ zAm|SnvbF#BzlMe9E3D9?n*S8IH(`7QxTmyiyz#;zOlJc3)?9PVP&VkFh#k6RVRPCd-XSU%KwJq;Aq4=UPYeQ0`76_5Ek%KKB`}= z*g}ntZ}3aolAht-aKjCs`=V8vXYz@1on6c*c|`(IrT)wN;&bk-Ih>B-0>G}asUVu^K4HyF6vnZK{QU6JwbI(z?n(N5rDBR^1 zUo7Ms%g2<1!ZIiQ{lypiK?j)Cz0U!-mzNhkXTd>>wn}s9X7>#iEYii1X39!qYZ{it z(X)X)5Mul7xzHU7*``R|ClhJx$Ff4OOF-8eC5 zzoi41XQPt_RN8y%^O2XY6P^S1adkFX+pz`e0wD%9RvY|{~z^1l6U`|4XqUBF4O#$!%$M zquvXkMIGMJddy`OAM|Dbbtq?r&2mSM92vk^we(DsGvFXBJ!V-hXLC4m5=XfbM^<_i zH{d0JW>`m*jHP#Sk#(dKug6beXa@kY*ubf0c`m>u+MPCy0?1rC~h4=$Z1?Y|+04VWB9*l{20fhm^<616hfBf;s z+mu(FAFh#m$vYd}m7gN+;K74En?4y+QvdLy-qN_T?y+Q#%)$vyfH9SJ@>v?%+Bl^6=(Et;Yp)&3p6ASQ$lutpV?)0t zzdTo3IOzhOfaLT{{9>2QUi=u}`01V2MF5+|;A8O;s4&ZVy*5MAerapsseXIvsi#&g zT850}{8PBntv#@JCn8WN9FuR%$D?Fn*C5Xk zwk5!8M^oMcX__V$WFZ7A#nI6&$|WZK>?lfiDy4BLeF!q?E+71&TwsA#;Shiz?T$kT zLfGIvR>=@7xK4L7z4X#cLD&_aESDW%lxNnvW}hJdL9y`I3@-qkP?oYp{8HfF(Gx3H zhK-^WvT5VtEFhhrd$+Iu0~t&FeG0 zKX%e*e(yW)3cSB&$4T5eV`lIt%QG=>uUYFeD3Kt9->kI7&(y{+BM&t?MzD(w0UVx1 z+cSP&ef3q3!h|*)r>E#ON(Si6GtUeHe7e}Ke8CUJ5`?m_DF5vYGzndL>7^me>8GD# zz`g$qN8b7wU-i3vWhv`h3z?#!0&05U`QZVqw7>%0O!4r32+O~TAATh~Z)s^ezIPUI z55*az9gk8DqD15Oc~4;DdP(boagOl}fMwQ?e4!X49Ak!lSd(So1jU&bStEZ?nx7@weT&xq(v`3uW=* z3nNT{Sow}9UfT0VAMEPidUGf1e|G%k;NJd;$F~jsvABnYz0sp9ieo?XeCst8o-6CMR2U`qK-UG3~gsmeTooyWD>V;Vm1o_6s6**gz7 zyNc?MPYMaWmk>%4I)qSS3@ss{L+Bw0(n|m-3ZY38X-e-XAP9mK1>vt0Q87{!K|@gy z5K-w!6#^kZ+WgOV-u_-@XYYM&-)=U^o&CJM_s*R;)921P^E+qK`6n-IE&e)Yh&rN7 zwZuuo(Esq$mHx?3n%rsl;<@$wT&X_P)&NCyBp36lKuixm{ICZ YK0jCn1&o`d*M2j-_HvjdG85hFdW9y&8OhISl6<3DmM%O%|-`*>2#vI&UI$a?? z-%>Uw*o(*|QHXxHtHrb>MAi&COExa$h3P2tyIyU+}WL@#T8+YDnTvH5>&Z$=Z&)GZxKG8)$`fl2`YH~kJyqERl;8qhy2Is z>mEK58nkEDkG>-iF|_GJl&8{<^mxxa0w5OI4}k?L)c(%2Aid?WrT=)YKV{@Z^*7JY+^Js6XnOzPgWDt;cbq1baW!k<#KQ zXvw_+9;b4BWMTAIELsv5x{8Y|0KS0s21JWoRqp`zQZG22;6)xEfBf-~eoRMk;1h8e z!_b(}6)ShlyEujC%J|07*R)5VIJ{@xJoeaQD~?aXA?Jgik7fVJTG@L-Nimj(i@r|i z*m~=&TjdFQ)CGV?fR+L;;y-%y=#XFjsqM-h$qnI)Ur`4|A1L}j(FZ!d4V4*SU`caIaX%bm&435DE;b7N*S}R50T*yA5r$gtSQ>rv;-Bhl}53-NSI` zm`|+6#Y`>2Cs~eTBSP9U+-s8*Mv9GU9Q0wXxdQBqg#${PcvwzBn+(5>sxPHiOB&=8 z#ww1Zd-6*e^$g<%!=<`F3zj;QH5C|Jh2^U9p~aU90pf7n!>rZQc4_?d8DpmbS9O;m3w@{Oe(TR$&(zv`(vcyWIVAzj%)pp{@xo2sh%=%R-m|#-J%) z-jz1Mk)AB>5q6**v4`I-zqr?LHRMYn%=N~A4nE<;;J?DCQGTPd&k6U0tHrWkng@iK zI0O*t2Wc2RtNz%qA2Vi55ISh=C`_d*f(C>T)c||YP+C~!!!foi(XRAPf}gzO5tJ6d zUYKZ(0;sj0p{w%?y8hN%hP3#155i>4 z)c~C8od;sz-eZ3l(JDQ@{k43f2uGda^0e0DvUc`8YbVxF-~H})Lmm+nGFCXU1}ns! zou4ATY(_V)zo8}0MqRafqwmqKS!>ZaZarf-j>kRo3H^t)03bfT*9hZrY=BO`Nuh_Y#mY0%x@j( zs^xjb?HBdqCT2*j7InSORM8ym-0)SEEd4)K5a<6jNS zH_e|id+U)R z>JfPs{-UKYdDl@UCqy2OO!mw(&n(l-3!mq|@4own&)Khj^{c=OAS**TJ`RtpoHZHX zAz8nQ-|HS;0T2$)dF7zIx7>0|0OLq*t*|=Al6kHY4c^g@vBn3lf)zHvPRd7oMbQXX zjUM?zMkotl0HXkE2{07X)ICavoF0H<&p`fMVH% zpJ(!if*5Up<<)r11FA#$6Tln19rb~APo)#*1wQ~^gr#l(_MtRNxnmxUZwz6ei(DA~ zlrR$}Oz<4b3LV-DK9atUObXx@piLYgG=O64N5~PMyzztYq_4!M8o@6EJO((*cu@M} zfilWr)lPJv3E+<(d3)@!$HKh8(Y7jorE9#e67vRe9L6n%As%_;k&uScqn|1tTzQtG zUCFoFg}H$K!Y}Pp%*7a5p#4RUGD?via2ysRsZYSjIKl%e1*E*c8MZ({pYr1{prQGM zav&oYxK;araruz7Vl@w0Bl(Mgcg!s~WTL#&SS3FBgDz!d9{><5{Qv+c07Yb(gj4_1 z(a$KN#&nby=5ywC#-aS!mqJEDAC(-QcZ!cyPb}~uL)6@>xVn-)^~xSmssM(84(7gC znN_qkHkfPEH5Uq+L;u zoAdf*wePpy7r;GRj1>c*gqK?rLk|q!gNIerM+JLei#{wM(qf0pe8{uGV zSY*RU;tvxOGqy03Fqmj{=@PDM9Y+lN*Bd%yQVJ)m0ZGc2Dj!KJxoOA0g_k*IeyaGtD{tZ+PWZf5}HPkDjX+xQF0_ z{*FKb;ZvNy_+EL%Pe&dgz>LFGx@QctsZGC>b<0ZO^-gJV9JT!FxgwxLFo0l+IVJAv zqNB1Q=)%=1#34RHqw%IuxXwE3w5mVR{O)(w2;jUd;NJ4f_wZA{*v&up_pPiACKzDQ z5^n+dDR1$$Uf`bc5Yylu0RnBo+)BUWr};Jw+u6^6jL>2we=omW(tbq%I%q4oB)mP?gzT5m%Pgz&$OxnY$XX-r9;$~i75^xUy-ky6b6RhS@ z2LlH5^c=-iT%*~I#Sq3jYdG2(p)dkt9fPI3Hd3d|8N_3bm6kk8FFLLW z+H6AcJqP$tnh5K;YNLjuycjRaM~L&mhhg2Kbra(VOUCoBToT$dwv`^?mG^#jjMQ;o zO!Zs;@CM(_=G+x~_VA-kSpquuzW2WW)HVRJzU^_^opDcjIwPIVN+WNBv(fZ9hAwOV zZj>JJ==<7)QGdao&6i5nPYA;%sWy+}JOJiqEWtxz9iF4};`g=OQ%=6IwS=AB!ccFr z+%vo7y)*O6e9)1uVp&=MMOfPdbTsJj$XZd$yUi|NBfMLA_a*1Zep<19Z2Wh|`ym6r z?2jJ5U#RDShxYB1G~;=Y{+CtGI1KMs-)Ct*?&Iw|psy6b7|~&E!MY;` zmq>1{JOOUu)DxZw2VewnnBI$)p7X9KW3z6L`GQ*G#x$YBH~;FZuMT-YehxTEVNir% z-vH2_WHWjfUy<>^lgiRzUS3ch!IBj~AH`Q#+7&13-||sVpM<%%qlaO`)`Pf`U(>XEv~0a#1dy>VT@(?N^yghk00&{Zl!0B9y4A@u0Ox88bd zxsJ&@ZG~(V#Yc{51&oRe9&jD}EXS|{s3Si7Bnr^+ajtP0IU$Gi*ylm+O8Xm8d%^-Dq)pQBgo7ru`H>FKX+C(S zols0CzgV&(@6>~2o>(FT;J1S*@GD*IXJyet&P^TA4$z@qp@GZ_fGyVg;K2Yh6Q6RB zHgZ4s`B?T%-k^n}3~72;YXn%zcZIx3uFnZ10uiz|#+(qxg)hTVH?${)HjwYZD*?QV z!)gzYqYJaj&Af(-Y!ZcP-Fh(#{v!ec3@ahP$dedhyjV2dD1q-V=|bayc4q=YtOR`#$-UKj`@v{8k4YulOueU_n0PrS6w-%491Hs z?saPs{0|;HU+MjfZ`jqeaShnC<(7+<>4`4aT7i3U9yqp_x|h}`!Z+qf=E0aY_i?=R zT}M1KKLLK?NB$7NAy@*q%{PEN>c{t}7K>CJk>>j?JA`R%P*kv__)&5%_0Kfhn#e>?`H4U zJn$fY%qO4vjm|pTkFa&@#>T6#24^i!{wmEA9o2^zW6TT9s_%}b?RN;-P@m-1=Us*@;jg9^0lzZwF z3nz0*c{-{bVZ!Xdf3@U$!*8z?06?qm+1lc_Kt}+z044$4s)b$=X684zmbw_c=9m0& zj~o>0c!%58^S%1E22|BTqdj5hesO%1Q~?~se$JF?cpiCa3COwJmJ7FjZ|_=#pNpPj z;NHQ{jr2t#s#yHp#)4UV zWM5W30XzWMLB=WN<*abE`XReyU5MP3IB|YzrBN-MXaWcUgu)S|0(Mo3t9X^d@D3go zAP~T=YD&LZw3H6tP4blm{!^GVKC(9)YyR|IrDs5wIEBfhulIRZ05tTs_&xgr0J;FK z!FM3PnLd4b85|d0Uj!hE(-{C7 z5k_h1id;eUD`2GF#WdMJAe=yZ@F;p8hmY?CJR-lb97W|}oE>q*5y1)?vQFf&fYu~a z0@#E@rheaj_hkb1m;*=${(0K8X`#I0L&zU}2GAuApkZ8Zab5|_y6)hE4-WmD@Z_7m z8Y@$gC(^@m8;-UV$PgOH7g4m9^)=cNUJ$+veh{D-4r_npnP1jeV2-11_yK%`rv==F zBTX#BF_$qO0c)u(=$Cxokdf9b9Ag>K7(eQkvI3$5@R#?4&)^*ZIlyG3orZ-5 zw8$e@(!sGmM88&kP(oE6$tUe3Wq9~k!qVqxFZu*|B(L;mK!en=XbD`M&JV~;IYova z3flr!0(3-c00$fw97eKl@R6ahsCP@vjbSAe4AjeR`%4IDTy^fN5v0Vu|wzOMdGUa?%L zdQ-gg$~dI1knzWR2#gUN-^X~rK=heMaH>Q31I|T3nv=xDQ|jn*+KBvKa>*rOUJV3V z{-yaUp6A~6XwmjX+ZTPHDekBW3rtwYt9z!&KBOznCbA|w0oQhippVC zhh>j;+|U-b0p-jBiZigCv&Fu%qr|FGGz}?dCb-*#GX~ii6XXnt=ZqmQzc=kpXAG3% zy)ntoKoFt+6u^!daAuh)JaV79xXh$&!l@7>_D|zfdsbXy;GJzoJ7d(VMQQV1G;DLu z8MEt5do9p1t*16&vpn9g{&N3ehq~>r{Xuv=Wzr|^(|6x>FP(mxyY=*MxmVU2=r%m= zc(<~Z`2{QU#8+Q)qknRTd*hFPbiY3O>u$#6$!*Berl$4f`uFeeY-7)vAS8q_OERUf z_WldI`<12rbxZfP-~ZmdaP!S>{vCF3Pdxs(`@>_8xr6rD!}Ybk{?JDg-O^iZ>E>Hv z@iysH^Pv6hXhmn*ZsnllEq1Cu6K*-v@(g3s7)@tKsKvo5-ZLg`^V87=j78B*BW3v; z!|zPk5ay&beBQGG!kjVG&H#zd1PRWB0nUyHX_GExHNl7b%2&SPOo-qpmkA>C$hPt~ z!Gp77LYxVLoLK@Zzf0@ys_(t%1`S%wZ907Eau{x?W}E8V(@%fqcHZS-_tT$j>gJud zdkFK}-+t64udN~{4{5!T2NSG06XG}%)Hwsj1$sndPABhKF3o#Zm|FePJQ2?XcFur# zfdqBOd}?42X99L-8|WdACJ=K5+;avHtu?7x7S0y+Va_zc(JaeDoN&GEXM&MSGwpaq4RgTHfBy6GbJh** zU|{^vk4(rJ#;3C#t@-DF--6#4qAa+;BF+F#<>*=cwEQxf+AU>c{ev?>tTPacvkf$7+T5L%hiVBK;47@bSevmfDO~L}Kp)3xz08^o z$C{Qg%NmR|j|rW_dM>=I;~#55*3GPS>Fc(s6lk;lWsWu_2xo$3XBM8rTGNisab`Wi znWc>IO|;`=Dt=FQ>fX)WGb37W*aSiUf9XN5V!fcnlhPC|Zcvm>nMS{)=ROgb)Jdi zbcK6(BF3dzRB*OQ7S>Ab$(LDwaJB&w=IQp(VSZ&!Gi%CWJwrbK^Pm5eNmAdo!Rl<& z(Aj2Y*DH|HIC!W4_Tija9zO2F7v*2$-4U z8O|*-sJB~j*pk6-y7h;`m1>|{i7ki4af`3Ipxffyb=)0W{V6;zJg}D=bNfba$|qCZ zZKED@Q{SszZ|jd+(M_K+-OZThUEd*#yLI>P9}@cLVnC z5K^0USWLnIPtFwYoGFFH-mUIM7fm`MQw(jY7251SgcjV>rkJE=PYdHhOIRkkG`IUj(0xxeMn2-jtM9JynxamD> zJOfq*I>^vXp}(A_(ndxGpTL^kfK@^E4IR}J@&r@p4bO}R0|z)dC^zWT`{`MBWUX z72 zX$Kg8$JqMhlTVzT0OSmS=lG`B(XnCN(Pv2uo{PN-cp~N%<`Mb;e53*3!d{9gK9{pY zcvI{N&(xDChlRQ}1=-+dz|bHsG~i~aH#{ku0cyiKo^a&JESQC{ij10Pm67n=Bl9-! zlru#$yvmOK4`kG4@z5#F^2>}H+Rv259Q`wOto!LZ(1PcozZ&4p833|XeyA55>A{QO zkXiGL434%S4dl3tF**7JdtA(Ov>WwB-!VX1xRN$eod9$1s7*p&6F>g~=Ip*+(jn|{UlP~x{ z^24{l6bpmgnLHz#z|mfm%TDugc49&3hiBNn9!}#kJlH?dr_G>GJD3$qX8_y4J5omG z7ut|KFs9YlXgAuAeqdnV(2t?bSfP(GH#2rAul5|Me^as#a#jO5DhuUnnz!8m#!XAKo3-(#oZGP@~ZiazdK7IEcch5lwx@qsd2Pa4+ zgk<70P20Rqt4<5!aNHs=+-&v>+bJAx-rC?f2k$i8@BoQh6$GdZqh$LwZ3?< zrP2MsgWM)3om@*X%3Gl3JKCk?Cr=385X8#a5d*pn^@X5of?<;(kQ?GJba0;b9AAfWB@UxNn^{(d~c0Z>_(1ckH+U z?%3m=v`vn-8(HU_|B?wae_(-7x;X+YY;*E0d$q4s?mWB|WGAq%(P z{(xI!jSWrk_^b(&w=rSzZ%hdHyt3DF?N*-4@K|V}6>LtOW@D_I<^5x~#bydV-2|TN z2jMP)IDr=%X%_^UIE2{z5J2*crcZYTM;qZN8|xN?fo!U<7DB+wx0THWgq^d>0jfbD zjgZqUa%9erMe4l0Ce8Q3*;6Q}w zeAhm}tKlE( zUtA12H|s`Wd}*VhBh#-Ti**NK{lMl1^@u`L>>4c#n_10pY{W=`giSa4JpB`ZAsch# zPwO*n%4$xeZ?lOa$Na;_85>HmQ+WL1`#4Os`>c44)?KxcFQYn_^E5T=YIN{d+8^}m&&n3urPl(`6ai}ajUyg-}{mQ_Y4p|lf6*L;@UtB z3^0(x(n~Gvp8e~?Zgn>}P?-M3%x_==B6ZTW*x_xMRG>? zg%pTY&U}R%_zHjS8D~@1MC8?4(}nQaK+2I!jA(09BF_(e#kn>iyUdg69B@4k4M%YJMux@ zlP5qqDEZT;01ryxU1if1n#{3`ANrPfFX+Z$*pGl8AYXu$s5i;RD5qHu3+q@wbM$HW zxYV(K(Z7HHAcLVC^c6V(Oym9~_zL<9w8 z1h7BP$iJCaxJO109r!MO^b4-!n>{kx49C6;vUA1rNec&fkZ|yRyu-0SLI0)S102+T6|!a0pbf}B@koa@03eBDPJ%XoY-D4xBOLP_ zfIapvpv&GFa}l)JW5B2edpRf@#%}D`u_2H2f7*<`&G!|>S?V4DC3^@`Bxdf&bJVqD z2zg-&|Dpzp8YpU@sDb9x0P8Q-Xb&4`yKqGfG@}N3HiJk(qo{$b8UX0TLK$Xjc5^To zFs=ZcVBTP05Hhe4$l@I)g@rndItyvaD5e@FfQ>4EVhAg6F;m7ppcowV5$3?O%E6F} zVHHiiYxD{o7$|-?7<|#zHKh%MhjtZAf*g#Pm>f27VG3dL$S;;BU?yaVC$>%&mtVBv ztMUMog@z-J`jvkiw$^*nM0kMqAAJ#Kvk?a!7<3pcv>8RGk-R;5?sr{p6Bry~!h?5i z{*l}C+h@2b2GV%y>hG7bW@#5b0+1{R zKyd3zFEb(Uf^NDow|_QJSzpsK+uwu`Xq)jvtMX6AxG!%OXFhpzU$@Y5rT#jl29OqQ z#YQH=7S%=C&hm%xMjN&2tqjoGyEEHFu!Pjas{*} z+LTZCXc*GJ058JGBW$e20dPU6f{+728Nw9NOzRJNYWm*2d*qTwCb!?deQoUYbXQ#Y z5BK$>)^MkuvW(SrZWI${AjtD>z<>ckaKM}gKn~#mLK?~sKo!A647SRrSpOT9TQMm= zLNWw(A@CF75tg$F2KbM4quNb#Bm#Ye z^(euhjn4XwwI=DEXjWi9pMQaJvVUPBh#UfZfKymZAif>Z69o6q**a&+bw6-B4;vnZ zo_WhGAfdi|$(QD3SsY>=B845oDesEYj4Nr9j%ahG{Hzu=&Fz7r?J32d?3%X5A!vA+!<$_5%Ld60dOZj ztP3_$Ceo@(dK-&@N~F~Hai>kBJvvSj#?p_lIeZ^&Dp^%ixTerurn<=38ZQ{K+# zFLPOg&M-x!M^1mC4BW#yC2rxhdb#xvUdg@r=l_LY@N5|`gO6R&Ex7Ew_RX;&G~EYK zYQ&8S4qlt@FY*iIeBDgMu;Eg}+~%u{bkBW! zgnRzZ*WEh%^$&gSy(daC%bAJf-vO8byn>7SR{Sl^k6n6${11y{05-@c-&X(;^|mZt>0KN??_Ob%5hAAqjDlb4P$WnG zh4LE?>zyb@)5hWjM2m937bE|P`OKImvSYwISn-wRb(iFLMh0gSO)xd1GBo zy;A?ktI0R>4~~2iK6bIu2J%e3l2_^vrQ&Lie8eX$DLnaxmjGBuokJTZnKrx!JT4Av zi?lIi1TaT<@-1K=07~i?Kl(m#3B!C!J7EPo~i^auJ2z(TIb z!5PEM8GtC6x5+1b3h|&tJF<_$7+{<-o&m-p4<|lz1mIK2Hj%s21_Mk!O+TYwV%?86 zrTw6Rg*{FJ04N9uk-B70gFZl;Gf$8QPKkgvV~epUKA-VH+fzRBOkT++d?2*gmm|NF z8NPeu$dMs^_6F&L(85ud)CK(m*?@q1_(PK)_vDp$^bh!8JF+=^$0#d5_Hww=MzLe= zXS@U2q)x~e^l8YpU@sDYvenqLFV&wLXeE`WQ@uiUwm zVmJHHR}`sfaWI$GPf_f4)Bp?+0!K0B?I6|ZQQ4r>q93LL28u;D0u&fwHU=dah%<-g z3=4hA4g&!*rNz6#!N@`zrV}k1GRNVXTGN%>)tN`sqs6r?&mGDIV3YT8lNT>ej>u9q{i!BYOXVGJPh@oX%$zS2J zuB3&QBV!Y$99K&`(V{;iz(F9wrYHiNtoV#!7;hMG%>!BC()5`}(9A`U!hC|pVqQGu zN!RaPwT@X5Tg{Ex{SkN9-Ld9tsf$ecd^Z!C7?7ri9ltwmjA_t&H&5@oEqd$zGj!+z z?oW?z=bkdq(#fYRZ9(#=gsauf zih5O=rM|toWwmu*tu^n2W14J{Wz`d{uCO*un&8&ChaP^%=F;DXqklLG1R*5?P6UJq z6cO|>zT4weH`Qq5d?KnvYuEcUZwSf}yaG~XbLoHs4hZvHyAmlPcbOVU+nbD`aKgOD zJfsbdtPHa;z*>>b8sDblR!zsq@csTU>pOTYu~!?#?lz z-K(~#TFvpjX7hluP%h?UHe;LZSVu!UbyyE&a|Fews3QP4QPyaR7vT(wYbf%TGM^$(3R~t&qv*xzx_Bj%GzJ+>$baMu=~SzUvQ6~ z@o!t0SE8cFrL8-={xx_l}+5CjQ+lRoCg_ zxk~qJI%7=+UL?4THrx8TLGIUM|K@hMZ9TWv?kkj2{out<+{0)5-OazGS$wLc&DF?Vca>!oMilNY?2MXu?7%Afdu6h;6l$-kKi z#x#-HBL4wggwk<64&`9hrN}Kr6Oae%Sm*%^VeJM;61iP1PIQo)vDO2;ge*ad(UoYR z=!cw%wF~l;R>xWAL{xh83Dy>>|D52t-l8Mxc$6Pd4eRi9od)lKTmyh1?*ZHZFp`W= zv;a1dFTw$o%W}|SEr>%QK3;>tj{?BM+8b-mYNuk*UMOl~Ed@*N>|u}>9Q6YDi#)O} zq<_Y3iu|AYM-GXCQY*E(r)OlH0D}PF061b@7oVP@@W|OI7l1-!LhKKa4o48f z7XZ*i=8f!uusV)e=|G2iMy5@fSnH~9P`A`EJQnSQd^fhH9@i&w+yMrtqHh5jM7B=7 z;-Ckx3Ob~7lqp(?2ZZ;77b70vDL|U+AHd_0_XDh*Xlv4fhg04dSEA25+L1Ae+yyzU z>L1V*;|v)LWk7BSp8;*!g0_Q?q+EcY0P5jb+f(<1L1spIhy!pDI^>Z$jFo&s#47*d zdd~_<{*jGKX;WdU#Zx$}4^p>MqLe><2jHJhL7=~rcful*gg#>&c{yW+{z|>#;G41f zPTwXR;54o%+Ohw^oWh>fNSoh~6$46;K?sh>#aIRN9QB8zy$OqP2g<5>0s0(UOh2H# z=#S)?{*3G%P@5cb0_5`aAKFHsaM~L{BNk8DJHi3fLvf#W=bNVfKpq&kI#EWn;pyo^ zlo7|3euAumeJ;{uZ0hJ}EC8}b;#8?&!-j?W6fLfdUD9FR0C0yBO+iYu-ZH)3%PsGZn(&mr>xF;# z<369_*BX0B>0Q)U(zHyd;f?7o(@5X*eEihu<=;LNp7h2>djpJmV~|5UW8vDw-~9Vm zm*XsJc}?jK88RgFe-jv$XjI4}ag8DOrm^Xbfi2UjrT$Fg(KCijKvh#Zq-Bh@H&CrN z)BoOtFy-{pGUPoQgC5F8x+XBnOqVoF0N_o)L|0!9+Q4wbX^^ z8Mw*&>0|tEqnC=}m)daPz~0_~d;U*PZ00xJbhw2(GK4jdUOBAjthe5L;hmp;nBg_w zvOGQSP1Cm=uAV%JW<6K>tqFL-&x96X{xBhjXI`<518+i@a$f48+tFc7KosV4^3J@j zp9xF60WrOS?n0lX9Qsin6JAy=kAX_6hT)rI$MSjGXz`3crIB|vZQn4DE1!8+=6wSK zR87Al>-PuOt{eK~vwz#t|8D$n|L{X?fB5_9NB9}j4)wqO)h2fTHTxarCr{enKlD4^ z9Uh+j^h3k_yYKAb@4aUu|EmW!_T0bz+Gs!ix5EPMM;_kPKlqzr;T>_a(o6lGczmSM z9TR?!KC-FR*UWD@bz_@8-j4C`CQ$aqq}9~!?X6Q}jL=wXt={%+S;TL-<)YqX!JXQc znlB7o75XRnVV-Mm+4ABuk1_`H-WUFz)BYwX_RQP1iQ;YJDXbq^FIB34)?|-9`e?=Z zM*T^2Y*V?sPX5I`_t^NK=1+{)Hmq|_u-{9DR9}DHb-fAey;*YbX6Ym31zv>pC-l}h z;J~oH+5L_?egE#={HB*&8lL;xnoQ|cy2kyQwJlEJ(|pUn(Q6((+lcT69P_rx5z^+W z@->>)tcpk84a8Zl!&>v9x?!HRO{co!XQg8RY;PMbZD`r%l866G<0ucYKWi5Q*_6X( zg^9zr_r9a)w>N)9{N~KXVWR24f3>#LJWF-=yZ>gWAN#^czwp4ybN!kJ_VFi7*})G# zZ`Ja6s5PA;+{}Js8|&USYRh%F!M9cl^-A3i*rhsek=CW=4CFhu*C+qp0?p)u}-O-;r<%p1e-z^sQ19n~9uRLZ6zv#NXTg4;p4!5rt z!Y#IbbLQu?9JAx|f~(CF>g4#(w+~m^kl%h!4)fdIILL2z(>ngxNu$c|*89c^zW>gP zhk9OVx8xhtvi5JJ9>%xGAma54GQ0QR`M@u*>OB62sTcZFF8ZcldG{qlUJOiFYTtIG zSx-KZQ5l%0+^5uM>!DF=Sme6!&;~Z~tm$fvr*LK=-5bEen__P{F7hj!S=sOgWGVYA zPOVq?asyfg-csw?TH_*{RoH6vr}qZJ3He55VW6jSe9@`(YF6L^9kW81O9T110qwj2 zgMz%yz=LyVf&+oi`AXH*1OB6hX^rnlhv}K&3ZDTu(dIl79m) zdjsisQ{*UzCojqmY2XY<)+Rru)b0&<*h+S zIOI5b&y{g&U?y)z1BZSR%h?T#RSru(f^WbXc&Y3U4c4~?;`7Lr8FvOm4ev=`c_1A7 z64W)$K$hMBRjueCpTZf?&6{$QH?WmAk}sRsk} zwW7z`-jtYvOhtL4Uqe@QU|=xM8Xfrq&VXTo7T-5ywF+mzwU7@}2J`$3xEJ_W`Z?($ zS1^E^ z{=l9Ydm`}Z${TG%UtMy^CCfCSO@G6Y52a-QR*(FYycp=S>~EmBAcr!miXjj5CFJmw zm%gI-T#;WGfH>rhXY~OCM239uZHU7&=8!R%a$&3Sp)lkF$5=7Nv{rG|Zj3SZDGZbz zWFqV>C_d$6uaRek*FEC~XMox8UU_Bi!LetFjKGX=gy;C5=oi+7N}in=KJK+y*} zvJbG1=t%kn?Kz|W#^35n_h=SWbVY}lGde_Px1+!fot3UZ6HR0yHr1Y@BHWDc(ebIU-L8VC)Fba8I~ZR$0ZHMp|GZ;ylA_u<&Tqjkul~p=p|0p^ONtvgj>i z!oJO}xUy9KDuB3n<2EZV-K`!4++$Pwu)jU)$85QU|J2&_ic2mDY4lilVZX@QgZx5k zt`&a0`Yz|q0$_;u^$$KYVf=cn+Jbh&nZ|D`|E%=!Lnr~mkI*cuyjk%W7i?6}c3JPU z{Ag)8aVm|;c7_(umaAsU(2+WMm^Uo;2!apvdy&5 zCy)03eXF$TOj^%9_ojdTxvc`osDJ<7RzK_cb=O_TuIu`}zG`J{0U+JlJwUux!jnf&=^c}_~SLU~@AMZ>PGUVH|nS;=axt$+ECWJ%r zMngUM=6iskyHcD++XmpE@|kxf?*LWVw8$zue$)?Fgk+WS*~q*3=IiGF@9qskpByn_ z;fnO1dg|W>Hub(=ztUVvgS_&}M+T}lP+jCc`)r0k@4RaaP~Okz&Ev<7JHy(qB|ND1-_}{D?S|=^YxcIeZe9cW)K=zN@w^_t@>(RPS z^Xg9Rn)g2S2hbq^j^3F?A3tp4VZNK4in!ldd-^|p{)iv)&HjF?YX|yoyuXbHsHw0W zea!;UcSM?euTeY(go13Mku*|y70S70buFxOStB+=lQ{6(0HDmGb4@hV2aw5{b*!4= zkdER3n8N`)Lw46lzC{brNHt|dKwc<2BJT#wg#%Or&#bhO9|A6>ee&`I@COHvt$18% zTikd9sm7p~I81s^z2Sxq9U5p+9+io(fR@aDbWPZpsAUka3{82B=B-sRznKy~wdYK^%TDK#sbl zAK(ZBfD+z|xU@6!ZTb@Rt-P^EMBCty570kwF%S+VcpQqa#A95MmRaZv;{(}%=y9c= zB5MU~&D^bbX1OoWf!~)Od>H+LJv;g#vR39*oLRXmha)`m9!~oe>i>*O3^x!y9)HxY zSvCyvRizhS*#k9;hppuP8k4#bo;)J2V9t!c7vzP$D4L2x8E7*ApJp|&{GKsKdn2C) zFiw4^4qBoky2Z7&HC9niMLiXLpy&gQ^Z|Hyfa;CJE#k~QeJ=f8=RFs|JzK~W1EcwM zzycjcb9Sv;7VR()FjjVySJ)Us!v&^QWg-j=D;t$C5fc0`*kS-03vJvhue=iC0_?*Y z5_Axji5^z~ZZJo&UuWI3poj5_t=RzlpnNbCXt}`*kRQNOFlxk+_8Zz#vgSrTp+Sg6 z3@on2WdJ0ClW-2^4=ZVaBB2|HZS)?&87|hotd|zrSvWJD-;_K-OQ5%k%#+n=rMy;* z+cKZUz`c_VxQBb_*^-q!0DC=*86I-d$z^b#Kz_%3JjsunG}$jccqWl}Z<^lI!dv#O*?Ev6`Hcn~y>3iC{J%ZoFiIc3HU--`^ zn`UzhaPRiphLpo9PP4AGC&ESM4g{wN8E^TuaNXXZkM=z&ia}^`~nrY_B(e*IrYywnv}b zepF&rqGdi4O}>{n%@g6FSrqub~`JtM-3Pm-}Om?jQ1S+NrD9x3Zh>)vKHT&9Ch{ z)iR#neP_q={iKg~2>?FPSZ0|X;oUoL@8Dl}erpqAR{qW?URS!tbxXeJe_bhmb7Nl8 zHleTgGq8GAU7)-I(3y6VV}k|D99Xc3;~+3+ZbW#^T8wvC2|mn#ds?p?ZU8*)0pr=h z;bD9chw+65KAU-VbX17D#{LI{up6Cqc6c6f(Z#`1-#TV(wztKN)2`>9@k|${!xR=EksZ*2^e zk;ldJKpY>?B(fj&K8Oz=rZqA06M+f=Is!m~F5n@Q>j0dhsE7g!ZAp5hg#&a%nPNpy zz(#=OII#c+ze7J({ZLNcA>YB#&twdNbZHyFgIJT}hoc{mXU3)a8sAg^f9%1)Pf^b} z?4{6mFp|JYG&lfa)D7haY{yk|6@7>{q`s&( ze$0>b5ymV1oHVIRept?vLIHAA`aZHWWQN3v`w`Yiaionbm9WfJ(8U@Na#`k2l#A&* z^c(ni_L%7F+AoXC%)Ef346@RRY#s-V*s3JuqCYWTKo_|*{g3=0ClFmM;Nk2D<>06B zNcs5AB6pQ+>Z$-ToeW2(A_4WbH|MkWz!p}W@E&f(lx-Wox zwvZ|YM)T`nb_MRSP-k(x0@(TE<+6EWpfeC>DiM1EW`-~YgLVYNYlnKxij!jD#7=hAYDA0t`kpg*|moQ^6 z$!z4d2l$~p>B>eh+EaRN^omw;J({bP(t%k-;7}<{-aCXF)wDR%xG8Y&Rck+OcE?wM zddGb7Y53v4QgZYGbYC`2FkC9IO4HOc0!N%oz01Ga6;KO~epM|jW0Wz6<{bi`YT@(V zlU~dWXN8kCB1bJ`y-)o#&JZZzYSs2l>7>tjz`Ym$VH?E;+*xwT(k2@H3}Ge0h8Z)a z`Y%2k?caKH`*PmbUw^(bxThn4d!)@Ai2wm7VO^|cnU<}Y&x{rFz?jny`pgT;4|!6) zq&eN__-t?9$zwH(d+kl58S&YWNb4`INBX_$yB;9P<(I8p?vK@gdjRuDlQxbWLSeoo zd`|($;W)~I@VL`YKfT=lsSDoO_pGQy?vh8#2@qFwpA^vahxk4u+1)Cqf}23ATTf8!oSq-*-StD z@WWg6FRZq4q$+iK^#8W48&J1Uw}t)s`>f#)eQEOm#vJ$YOfV)iSKE7OzrZRzJpi*> z+aZgG?rw@Dd;f9ckWY?Zt(DfC9&WMqdWSlpuKA6)c;*p<^_HD)To%x%7COYs1FfyJ z%TnRE+;dMmzic6pBas8$*?j!67X9qA&%)8591&TIyeX?brRd2!tz(f9HA+uup!7=q z;9apyCjYFf#vL*+KrVn&vchHgDK6`7Tzu4TR`^POqJ`Bfj)}}$+u#on1t-9e;-VCY z<5+NhI4th*jg8Y*c-C3C7__EvyY04HP^Pc64i_EbV4WKocGg(ZX&fk}YyGFVs=M^a zSmh(Vk_L`rNz-+!XtL)3_zAuT08UH;Sv+GFfDZW*7>zKB&pjY3cayLLM*0_Mi0At}$=CdBG2Vcf_zB>|`X7Z&#-bc@d+LTfsP5o7DK~VG^8wfb4A03aIGwIU ze`D;$`yZmwQCCiJ=_uNw%ebRY0Cq$M#h5`ptfOwJ7vxjSA&R3b@-&Px;0Qx|=)@U} zOaSbpPGfK>&(H!~Dmw5|v_1WrzKQHjN8fVnGT~5ydC05-;xL3kKcLSb=gE@K(pQu> zfP~OsF9}&QP8J502l^`DJ!EkB(YJB&Njn1XrG^0Dt!1T_u|d8>7vL*S07H!4FU+_MExS8vSOF@J#4V#{r_OBvY2j~xtloOiUcFz=2Z z%#aq~Ff?NNQ$Yh^v;2sgC*X+FY{oqrevdrzNUO4>bXA@>uL2nW+LAvExi%{(gP0%a zqoD=>5ZYK#fmu(};TZ;nx`v^K`N|5@h~IVBT~}r*8;P$t2*A(?({p>T^r=Q^Ajpws zTdg!=;9d;Wi|@Dm!F3@|-Apj1pR@K%(|GT__sZIo>AUn9CSUbdPv53L;9||sly>|~ zpTWt(WBjh3djtp)#%0A*I@QKb92X%fu9^KB`U39lYC?6TH{s8l1(42w0VT^Y2oxBT z2)Q&y0rUan`R9wgf+EUn5s(A}Sxc0wji>@}sNL z;92*bd1XG8K&La)tv4;&Pjwbwp_4iUfwj`F2e_xS5ah*9z;d*w38S_|QyiBC9+!YH z24_^tb3MR4K+9}C#PpOW{A9t9yv1QE-5+|$^8SV&mNsV2K5O;z7~XE%#r>Uk3=PjK ztu&AS{i7w|-t?(s{ETU1On_dR(dawH62mv`PHM9B0*wN8lY@#4+N2!E3t$=&>tz%7t9f#w;nCd@mIYN02vz&m| zJsUUz?yX{*ScJWK+6;ezS<(LTsi%eK)h&H2`6)k*T-l6}#(ivQxDi_Q#?g1RS!>^V z)1TGhvAQR~B3ITx02-KM+mko?srWlKjRf$oL=&YIHcTpo;oW%KfJ`@)s>LbZw{MJg zRQ;v%Sx2O!`LUy}S$SS)&6$?LcDQXl-(wMzR~Q&eVZZvwMge%#t8evn^MGAT;Jq?P zX*LJ{s17>%%0_E^9GcSG^rF?v;ICS(?$J&F{SNu(W@R8Iz%h=-?XhUL0LYxTpUrta zYaeHP_pmChe5F@e-2(`izMEU0v0NBHdo9Q0j=Z!5(Av&5NF(*he3BP${i9d#H%-0N zpMKo8P1&K;->|@#7k+l|XRZdcgwhnCC4r9W%{$;E;<6?HWQC1<&$^eqiM4Ll`$~RT1?D(M05ixTDuvB^r!^AtUL2sgys-G=QYBy%;BkO& zPCDtN07#)OQ{@Y#C8gLbAPNA)M<0E(qP$6TkWsN-OfB{)|C0Y@DFP0A%H%Bn38AEZ&`ygwip+;WY1DEcv7PL9H%SVtWq*T=C>2ais@kQer3s5gEn?TZF=%lt&1akTmI#~&ZsjCWXA z1oTL|0T!h_X&2Ip=`aUlg^^>M=^Fy?VTn&x@?-^3r&ux0>HAo&lN>?$;tJ4Ic>-*T zAN?0co?=ik{Sdi7`68S^ck!{b9NUW%pi}*UIi9)#Jd2CJAF`S!#YDwXJ_X*R&hSg$ z<44|b={o?hlIO>90kQIpU>;A6RRB~?9e`Ekr?__9xGw6psN13s6n!A>13IZK_ABm- zJ`(qV^n0E6+;+z`2^kYD?u$-VOCP`tb|v%nwb1D7F#Sw>2Ic}r0Zl&g&#|#_KF}^Y z`|Pt@`RP3i`pyvGE4_NJFz_%O)#8a^fcbzaL1QXQr~$J8V+TEe4={t=lTOTto`3%N zo?n`-7<3phnH^8V)cUME6CO){Ss;yCX%P;lQW{zH#LEf`lL?b3VL?`yEI*YAlkJTl z^_1-$ZP3FQZ2h|D2n>`q`9k=S9U;GdT)yf#gX|S=r#THux zp+M}Pmk#|BN8a+nWckOm>31;N>W5k3DG%*Oyj18EhmYz0{O3Opd80gWm}cAyxCh{F z^UW6t^q-utc>wIpGmkS9{{IMR17bojC?OWswTSbtm$nUYnho4TfP*y}G{mVd1b6iJ z_G+HjQ)dVg3W*`sxQ6H%gvmW}jgpK=w!eiNu_Fx{Rj&w!n$TL3#U1;6nct+5~ z+=@e}h#*x0B+;#O&B{lm@XdI~_fK_AzXQaMQyHm))OxMbz34&%;ZEN@^wDY-_lQGV z$^j6Rzzm_dXvSB***FBr^ildN`Ppo<%_<7_V;bBqwNz;wF_zM!a#yqCDwt>Qu-}Pa zJ1pd*5+Im)hx+15p6CNgFR%R$J9Gt4UG2Gt&3~2$epuln-aWrCtz3Kl_~XC&i68A< zw!p`6xdQAl4*=Ht-}^iHzdY0Y`APYo-LD9AS;GL6Yc(}_ulvW{CPq+{&qwA zE<9~V;cRXSC6AhAJ;IK-_>z#X@7wV~SDVE>Eb%p?%xpT0H$WD&IQ}R;_Ny86tI^Qr zRvHd#q*5wM-^~fnSjmWid-Ng7#WzXi!y+P^K>+L69F;#;j@VM45g=9znI6?Imds#y+3a9 z_I}TYH}o8D+r7nVZyb*MgPt7`KuA5zx^Ma}eO_tw5+Q6Eyl3&#FvauCI$@ogTH90o z2uKA0tk0%1t+S+a0TvNC()g(VZtai$VB0d-Z`XTQ)1#oX>yHYRxc37<37Q-)Ctuzj10hpCJC9B==TlK3;`5#Wbz+Za86@KU` z144eWo|zZEtNR1|0T9$)fEwi#pbnWT{A67AbPUGtD&3PW09v)mk(Ca7Etae*(aw7Z zpHt7;CZ(f12+YGZ4HG{Dj!Lb!={;aGT;2w!(n7|FQyVBgAR!zIdGgD;ijIzXjV;b8 zJiH4I`3>z6=UD(nl#7rHR%p#HWuFmg7=!cq< ze6j}qILX~4Q>2`LexVCJfUsErR^AIdti0ieoCAl_AkS%El8ghZq*?FrV;liuISaT)8|r;=ZC$@beH8Uk^ns!ebW|VUdnnL$N2O8FoxS^SSUlA5 zuK?~Pi=(3Bx4+}FNy1UHvr}ENSO&0#W{ZwGLJ$y_;i{{y3ZO6q3-JOu4l}#mr;G*p z=XeIdBHRNY5dcVKg9&B>N%aH6#xVzCD9{!}n~w0rjhSowlqL)k>4@PI{d%sVb=YBt z1%U<3Xv|2}LyNF5hHMhIhlcV5JzTv)HN^*j2ym~OU?{JQq}|h)^!Zm>+4t(ZT&upE z7e{FWvcahz@qbuMQ)`QUCYqZT$`tfrN+#5Ksx4-TO>v^nd`56;T`}BiQF9^6|EkNRT zvkie^!%TQ+z@DGoIn)4_cFbrC@TVS&d#Xd_iQ_GFEMh+cKCfn0j?lE#`}SVb^q?ne zjsU_)n;*iL2OoSelpBBoK(6+t-DrG{E?^vxFEk|i4oWD56;GT%Z3q$piUWK`*h;^l z{?IB$yPCO(c}b2KC z_8>`R1G3lo?#hB9X6*}55Wu1J4mq?<9^&$(_lz695eVxlfsyywds+Ya zWIM9N#x4Lp#y?j0pm*;*8FZ>HO*4Z)|`_jMs>juJo$;NN5KFit>f2#)Tdw(;K-oelQ-9KgP zgwgju5c>I-k2o@n(d!MYcbv_!gO5KUjMr7|zLD~#eS@_peFIu-U?HeE>#VZ^_>i@Z zHaHqdt37epSmyiO9vWRaZVcR`O|%iqI!N;=ZNa)=F9T@;%!|uIxOg)YK2V!Z#Agl2 zW)$D!I3D+U$A%c+T;_7-2G)OAZMf;Co7%)79(@IV2`4}*{_vhz>F|!tCcb;z^TSFF zn^N4TeNkbGYy0IHU7-#`N9n+Sp%0nz4?wVEKN%IS9CKPr**S8RczNJH(dfIy!Xez2 zS2xeXzG%;hSL7ya9A}j+2JS7racP5G;XZn2q8~SLl>J&CfqU@tBLl7O^L6vxdNpTU z0Nm3?x6+TV06*y)^RCzFsAjGGnZXG+OSceTBi_Sa24*IzvD za=*c8CE(tQqh^BP^Wt?ye|SUkRjItS%EqPPmKNacnDJP;07>)SQmomQOz5$K?XT#ECZR894xX z_^X{Xu2AB;;DQT6u;IgphcNUfIpQFvW6uF6>vqZu*CUTSGQ5jH67(G$GRZhBew0=7 zE^@eZtjda|z;top;_D zXv6OdtQ$O{e{uNYzB%jQq8^KSEc!sv2P*Xe)-K4Qd( zaz9Mz^NdgdVFn=HtoQO0ZE4XV2u0|m_szbRfO{p&WY7L>OaJL7yBcWk(Xc_h@y5df zxPde|s!e%|^RU-f`vkxafS+A+zGY5P<{&BGTqZd)5n%Z6tpPLjWu@zNiD`OIHM39P@&Ji+oW(61eJ^M+vqFqdYJN zN<$Ps;;1|T11KK?8^)u`oL&*$Bj_Oy5^giTs59k{EBWU8#kjuhw%a^q5@<&G$-1WP zk`< zxLMcIJ6-$s?Hkf%Z1OFS9p820@|_28L4ENfJ;mp`%rY~$pji{W8{8<@om9E8iQfynBuvORbv%x>ficcQ8wXR!0Kdacu6f z<4PNT0`7hG>7G^@e>BSyYykB%AW1L(!FwgZ;X}V0?mzpqv@RR9-Qsh^QMsy{&bYFn z!x1R}eGvANW_!m)XXLvprNQ`RoymL!$QY0$R)w%OK|U|K=%VsETlr>l4GU(YMr`Vj zfAzI6#-`dJ-fGP?!o9{b$KAeQVEZ8^eBWq~J^g7@r~8*}Y@cG+JI4RcANcp@LijDO zyvkp5!twsH$)APiemn169%D+g(iIC5m5xJ=`vgEM9T^mZ!z$5l_B+7fC?N=p7U!?o zY0V0Hc^kdV_kiafiY&0Oc_ih6v>sUZ^39IRz;~Btc-q)s_oTr)uB?NoV|XFn(@!d` zQ#po>u@D3G=*zXP`JvBxgM2GLX_@3-TxWhqXUDqG9M@6T6F9cB<><~sUToo6qqRr1 zIy>w?TLb`7HS2A%x;Om1Rm*+08tu9DJ#v>k;2y`T3iMTOyOuUI2!g%aF|=o>JK9k{ zo0pndpVchxk=DL{9v1T2t55f)rrpePbT+MJhA%j?{S3Ib;kQ?sC3VAml2`V<_t`s4 zH$R?wxnE=dWy;6pE<3U}Cqj^i0|)hvghT!~Y>^t#I~y97(ffLUTtP#lgmH!hV& zvwp%M--yHK-T&@)zYF0}%u2%uXs7m!-vcVb)q0`}YeHGL%4=8Z4VNnQ$f_V~!MwQ0 z3UC0|$Y&f5*$$55VAFSTnc!t{fBDN_+Js}185bYHiHt-JfD*Li02!uf=@?YP#FhZ> zyYD_l3s=UF@&%75ClFv zHCc`_BZo{K@>t$!+{bMy%XY*Ok40D%nB!w!;pcHF$Qdj3Sx#U(6q9jk4`idbc)w2X zB$KDF$WI{F7_3WsYyX0|T!27;y^6<`a^l909ox#EcPPfCyuI?4cLEW>X&j3FaXRTe zVPfzea$z~fcp9E(ffN-kzEWP4l(XU$e$8DQ7xh`xXVC|WKA=9p+&ink_sUCg?V9~E z{#IAIpSkMJ2~{+3*KFXf+AeHVp_#?PwX5baD_>am0Bi;DC_at|fF9=5*&va{9soUg z+G{D}hpGLn^k#+M)@B(+0tG{y8^d+oI~0Ru%B zP|h~nY*TYO$_Lu9l#4N335bZmh;TTiDH?j_8v9XaXu2{^I%3X?)4KSgfdRNPWBN^Y zH6fybcuqcPSIf^#%~tZ4rMav8pY%;TF2dsd^8Ss(H3ja)=_wA!QXsGcu!i6Q`tf*H zxJIr?6P*4406+jqL_t(p>0xyVYcy=Q;}C9;CO<6LQ2^9HE5FxSX=Tw$!&M7|Fo`)k z{n7Ujka9c(z~*hY-L{-3<&As-600{&=FAwlr})+Ks&Kk8Z;%(jXw(yPfbte!4?g(d z0N|r;nXCB5aV&gXW`yJjeHk+V2bD&%uYLNg?jQK2S>dui$?v>#x3%`&)9-9xntSfK zCxB=XM4<)E5gMu!#xcU?)FFr@j|!Ip!&#qD9>5j|>HwGvTtS^tchX>w%ewT^Gmpvr z&-=UhZGQBi-|y*X0`Lb*d$PE<*;U{3yZ`og{%fzj?#KN4LBGJN^?#F;ccnjg@O*y! zZ-;rnx_95TfoYE?^G{ghTyxDe{XTZwG;1ElHbtbEW`d^_l0vVI32gq0J8S3O_}mvsj~OTcjYE^q`FVNv`L zFGO7`e%2Msl7LWoC;D-?)Q?Rsfa-w%m|xOqr#D@jh21vKQ;?Mq#g@ZS5m-|7of9^&X{REve9(s)Odg*PCW6%isO0f z>(?%WzmEHKl&1};S1jsvX1!D!JNy1=laQ|hxHohDNiFVeczV|Z?j5|>Sbx{l8~jaE zFZHJ%e5U7!Yx>u+o6UZ}y=ILE$&Z+SSqsEYa#8r}yY9NHrZI?>uM}KVEw8dLmMVEw zi^qHT7OjJ7312M?z+&WhmBImDlhR05IM$}P*jj$N2153PlND~!p*?Ufzx;B=JSe=Z z9A~Y;1=^E^F~#AEauL?7V!ulF88&HfQW0VFo#41toRnAPJnB7T4M$wua?34Oe#bNZ`ti&WzQjc)hKqRt(d?}2tlG#2_$!NV zYBQA?kge*AlRxASZ-S!@RM)z)f243+rDQEYo2(fEzEe0^@Jq)P1`BZPLkVohvuKgN zK-lm2e2mn4z{{vJ( zvjq(v7VdF*#E8az++)EAAprMS7E9mHs%JK@(2xfFgdl=^#AP6_SV6)c4ZxHMQNFW| z31IUy20Rj0f&erqWyT+W1O;rgie4jEkWzoVaBjbj5DnnMd_#GRd4;d&+8kt2q8n3=58FP z(R<~Ov~Ius0sqe1C9vL%>BpJ$WN&}@Wow)8xc<5OtTR^+ASr-S0`4g-@=Bc{4C6<= z(We>nfXgHtY;T$3_y~V*x#gBHZ;~F?9&yaKny2FM+{gJ%-|0Co4C&F{>5uX<2WI^^ zb^!qk0%(C^l}>uiOE(Rt=RDw^-q&)iCr#QAKpwzi+L~`3b5>k#zGogKMHf<1#7i{&UQE8bV-XF8UP__f7}f>+|VYy`1q9&$o_GJ zaI7H?9lm+w8(==)BKjIfTcCN5ARj+{t5nwjv~vPn3scfRQ`^xX2BEqruj`kg`hoO8};^UYDYNuPcM@SA(AJp$5>^TqmxjVU&40OX#3{`tX@4q>#G z=Nluwj{0K`NUhbDxbPO#Jmu&2ku)q!J!j#w^)UH2PpE%Uc4FP4uvAwJ(7c+pB_ z+2)2pp$@6@1y^saTv;hzGv5J%PA%u8^ks2xo@EM)dvVo{IB z@0m}am$%M2;?N_*lymff<7R7%d(EmdDnnM806+j-;RslOBgpPhVim{;U`AfLD8}-= zM+q=5OqRcnxI`wD^}doH;2MD!Nuz+WNNITVmeql z2e20VaSw1uAe3rlLbikh5R}qkU5tC{t+(3L56XPFSm`~DuV>bT0H3hJCBO8Fd=VD| z#T^zJMC2Nubd8ri}yM-2XLAU@z^KR-je8Hoe>vL)N9v~T-FM#&?Sp@%BX?6; z#q*rOR}}47v}4f+W?diP+Z9`>E!tw%wMAE~`}BLA_gnz?S{7Gbu@Ps-JfTHZ%Z5c) zFB^;P{rBHLSk^;032=`EJx3Glz4zYX=)$}*AasD?$$LL5{KY7cHdf`(Hd8+I4rYdp zE6OTAg-Ne^4*&vy4T2eIA|V(NcmjGb*=$-u17-sTJ~pME(ysLxZ6-h@Fn_hi$qEN! zhfs!cXT7iHr@9~<0)|G*hQ$pSRJ7rgW=vat&Af^};}(IFKx^`^los`-P3}tJ^$sme z+KbIfnXA{kG#&W^_{3qQkudU4!^h7G1DF(THMI655KuUUVXVSD>lp0xy`H%$4#FP< z3jmv>^{jVs_(twiI*c2R?Re>>*ZgtE4)f1Ey+s+sv(ifWn>A;iv6=^9OgKL#r2|EwmdQi)p?P zq9g1k9ext{A@D#5!?@Qk1{bGv(&xB5>AQNL5ej8l+^d&Pv%@2t;+P7KBB8CQCuyAm z@?{*WUUkJP) zAta{F5y%5fMcI`w=0vZ=QO0MPvg8oJ}X_x(N_ zZV-;el{K%EpZTlbq<=rZ$e?xnz(Wu7N4)i4zsoP~_1$e_K(y+)F1K8dU}cZ``Qu|F z{Sk++Xmiejew(cqYXx$|5+7l(z{io;7hn7>V-W2)ju{3jJpNn3it4e)p5YHYWO@J1 z6S2CvdjR+yd{9X{eQpEq0pbC$iQqoVN@gu>#F~fs5i4;zt|zaL*FtY@4x@TQSk9&- zN0cIDM&Qk6mFi}qtx-=j;N8cz0dUu}8U8o}H%Nn#jR3|vVfkjUK4CNK2wTT5xxt2E z-0uCvpZuUBj`Uys!(-ujwHEF9oo$|=KOh)q9Rx_3xTz3c`KafrvJr>%FW)0+`^WF< zp;>R(G#%#Y)URHeT`7FL0S51tuII=r06D$^%6D9Tco#O2c?Tf8PoF;FUSa49tW(&` zA`Eo+Mx`6ftZUiK%1T3i@fE9DF~88rIzV}ZZhRl7Q`~0@2;oneyW#OuFlSe)Gp$oP z>e}wICg8oEi&X~itu=Pp06H6b>VWcCX?Gs1|IaBjn18V5Ua36W-Z&Gu2MDZE*p7UM zoD3^Ud1;KkZ%AnC#n$UxIf3JW3cbAK56KI3AXyIJ)q)*Io;4#XXyG zPdxEN09vpIjAF? zKrbm>);+iw;01^t7h4;JXGf_bjSq-N0J*$8D2{+YTI0!|tALoYv=`H&u5mj3Bz_mu z002b$2q+f6xXF_z zx2j9lK=K!WiZ}vf2~?!hNC0{XoJBakRRR=}zws^LF5bcC$nqKa7oQAYCGeDZA<^PA zBOGlDU`gO96ghG5X8^*EJo3m;4*DZ`11v}!e&mVr(k6<7KY5pRHUTK%6G?kk{9?YP zv(nG%!?KV^T~X)M6^_0IPYiDzTTw)=j?gsq z^4yiZ&)MhPb8qgw+3WK;XYIX)XU%J`^{n6XEcwMY^^b|K@Ma%My~p9pidcybpB!KEQg7^-$i{UB9hsX=D4XF87=R_vX`ry_QDp3~^X| zO0z*{$Zxqk!ua7TR>CmZvfyQ7%~c3eLyzVKHr1ljQ5V8#`4VG`jRimq%E|SKY}(mi zsa$C`n=t?#lAa5>E}EdEkt?lymFU47N28*` z)k=pAAz&jIM0O)6x|M9=04BoN4Pm)>wdPec6`%Hi$;J<0s{CVH;c-VBfa=Yw{?VdiueFvxan$zZq}D8Lo}mT!5JrRi zEKRw!#)e68j=u_m@xm3YFsl0YEo|y<{LNM+kQ189jvTeRJNl?KTKq;Gv3e@#CZyjy^UO1WsqmZM{3d8TL!y$h+AA(< zNsIZKdLs>fFg0MZ0K%Xj@uO@UGG*2FSW7;tjnff!!?ZA*;l7ZpI4+B#UO=vKk0oO9;Fm}=Q3ZoYAJq$(; z4TuAXk9H8SgEYwJSg+9)yYCrimo#=={oYmXri(9k*I)5-H~!L}x}jHJQ}X-Xzg~2g z`)}tM|6MuROX^?#^1t2b9%wY=iYr}%mpWG2v)X7N)Y$#U6N4RE^ETROu24W@W1+5Y z|6>8Tx6L+a6)m`3b=8%AuDUIN=v80)?|+0pJUGliIbOTys-CG2xRyAC+3bW9!~15(pD@pu1UyB*&|FZ@w&5_B{Q~O;zE{vj z6G}Y*g4N_xK#NM?kZNeo&#(aNls@w~YYX1(IAmXvbeL|#Xg?%cvh6_-+IKPXv+;3b zZz3Tk-mQPyGl!69d;mO#Hr&=~HdfwpJDovj+X5klS^yF4RllLbwc%e~%25RG9r4nR z0g#lcVXYXh_u;FAaNB$@2k+VX=Uc!&1~=PBZ5W;hT(F)yb;j-m@RtWvEwO&OAXgl> zUhf>13s{jzv)I;${d0$q&u+^U)~1PJeVG;{_nvzefqTB~0K{#6!dyOLd(?xzVSrlz zM6mt^h{5-hAA4kJJiplsE%h9>yc_z9jYD2n*k-W~+g7Zv;3dG%%EH6UTGw_g0t|xJ z4RA#9`F;S3!ocfd&x*D(8;|uUSD~UAUh%TnXz9S9{f+RJX1VyRXW%Dd1lr3QH;pd* z{YeI#ML0lota}vaU;p}70F9KZe*|>UdQTkX39Zl=0rvnxVi1-UZEV^YW8+$XJ(E5H z8}Os8#x41?1_ul?Wy+Km^zm1{3D~7PDz0qSZJZ5Y($j7ruW*q$JBV6%|!CiZ9!VAa=`02?fpA6yY zS7_A3pwWzR#oWXnNKkEUEWm=goC)@gR(2U{NOhT)GEK& zX1)M)%C}9t18D>0+)1=#6JOdfLxzffxZ5ysI{Ev8wGPeOJ8$p2542AoVBH3<&k8=| zb>BX9-x=!Jey7Vl=fFL`i0KRs+8Of5WW;9N9$xt*q^!_#0tk;dfRiYzts6bVoDevZ z%@d$Mu8H7U1j4WhL!$>kHkc7?tT1f+1qfLDy!O81 zJ+O8HLm;#dU?!XAdX$f`iuO$oMmaR6T$G9aZquse83qx+X+RJ#FgWmrK7ErPQn;0v zA+@BNHBL44m!_qzTw2x8FGr^uze;JMH4#7*eU~C(_?{ojlkb3B9YIEMJw!NlV!R~eV5$HDc}Cp#IL`vk&^5VlzOkEo{+~lbJ^l5O9Xx}&u~a9UZ@#1Z=X3sGz|RGnZMJC1 zUF`~ER)9uT($Jxv89wJ^clV$AyID>9duHace%)5MO1!Is_u@Xx0ULl$4nn9i`io`K zsvgs9)-*7*0j2Z(F9&{4qmjF}+=sD7Wetk6x|M>>^g*nE(7rSP4>AMj=@5G1YQXa%>xaa;Z@WsAe}P2)g5EiT_S z4r8r9bAtWmSwEiXYwV>ktdEe)jl<;cE$4`O)&~c9I?Ssl-Fndq9it8Z>XlE?0f@v; z@AKG2K-VTmuNK1fJ$)_L(7o;2cb@e9nCqx+023mk zs^E)Y5O!qE16Un_h>t!l05^`@>u9&z^_zwKF1z_$@Igmw6F?}GhjG+ad83Zd1oyxL z4}>(h8Q~$X<%l}em5J0Pn)TopF;-ZQ0?^VvBWvE+yRt`-HZ_gAEb#@7(M<-`#7&|P zKSe-YOA8J70*(<6VM%z4Ni=W+jK!XmXG;gZ9Do`4LjZ3u@Ty}vxWnIx-Q#zJa4_Nn z*|Y$i(T;H(-UZqNpu@Y3XTt+Zg71r#|Fr(b+7ol_wbz!?VtvQ{2E!gjG;KO+JNW1F zmyKWu2EZY_cDdX5xUsK*_W~f-?rk`u!BBqMim>oXIaH?4B6x`#{TsvCfYQKL-T=qK z>yRH{R^|?j0FU@FK2Qw6J{e$>+LW^g&LN-ys0;ob=MI1h0XoqaaOeBN`dICWwmv{i zjB|J?teI&8>I3@RTtgbH>11Ah`Q-p)0#r)7*=tARdQ*AY+U^LzJ=HgT1rQ_vPp-KI z;rs{STE=8+cn#_HSHRIuhZUh;GSQ&wWgx; zxO3}|jh{3PXffK^>W76nQZeKk$sJVMGtmOvV}s1%9gRcwFtM64vbhK7R!yANgk@8# z@~}~1V}`~EHrH&xC@&ijHawQ`0#hUH@EWFrq?_36v4IAh!)ArNqBTO&oorCpbhBZg z+-zhdiBxO3Xcsn@#ILoV0q8*WI~)F5^PzZbNC7R@dSFj|azK6Y#TQpqCP_Pzb}a~g z7`8B`#H7hmpL|GQjH>LFZ1M$Q3kD2_;FZENhrlF)F;gjQl0V~;{w4q>A4>FaSxU*1LhccgIriF32u#~(f5K{uq8!`_A7|3B`i(OY=w@r8XJHuem~)kCm-_g&Y2pVkV`1GgXIR`l%_KzYrx-T!Oy zfWO)Q6Y}}^<2(9%^mn&<3Eq``TPO_n)@OY$aqJEH>CZyACuYxa|M3iD4t>%X{kG0` z*mlW~=C!}t()I4$!#(%xPHyIm{Q@9&vEm`$kIuis&G~wC2zT7E1N>aFqigWsdgu%) zGTi6PniaGgs6Mc7yX|2wIlYm4W6EwJ?(47Z+5!-0^U+nGlja8MhC{Y2WmbH~G@EhO zRxoCX&v<8Sw|2dgB_CZa4BtTJP1YxT8<|IkdNAMUDQ|>vbikyCXr^FT!@-OfNMCg! ztpLC9v@}l7^;cKV`s8ExTYt^&#z&40ZG44K1JHL8GyU<$9}kCHw4>;@)>fYp9kdhF zD)1p1wc7Pq|JIszz2gDE7ERWHI#A>Q55W=E%kq!yxJ_bzz4Kk>IR**JpL7A8@@@}G z(duC}LE7}}GY6%tf32Hn=XUaXvmW&VxB@}5deCYM8k+0c%C2{wzw@{L?$ob$cS~N&^al11-Y%akQYl!w+CTkfq#v-`1xD{AzFD9{B?RAwY7u zIZMF3h92z$Ov_St^EFJd@2|C2%j)nn+Ond?P%8ZyXqjeO&~f>gLle-fO4(EqVRc>v=1kxRmlzBK42cg+4`~107A>{ zcL*y`oP-|~UpD(JCA4dnTbY;G3{Mj6prnKyxCd9B2>i zTwJqyYj6hA$U|kG=2*Y^*f-9`M;nmak z;cH4;B)luavuEa90H7Vuj4AeT80Mf!9>~O+o$rgT?8c8<5-1;ijq@4G$N7bXugDMJ zSZ*sJFBkw`@O}YVVc;90ZB0Ne)kAyR9Raupzk)tYonvSN#ux@Sg!&n)%y0A`z6#L&j@_yEIU=r3H?s&=67K!bfX{B-6-Xu%5vz=x(s(#GxTtFI2=J@HYLroL4n z0sjXum2)T*1OQ+oZNjRb1DK6@ZD^dwh$l;Ul>sm*@ANVH4Pa8dgj`HKLu=0bC!8l@k$W38n1Da*tfe&m!;GFYW4f@PE%-7cBz`eQc zqx#J4yiW2ykoST1>H~Z`5I|_J^zw8%Pv5rR>T=IHaL+8X@($j*4$fvX*4orr>y$+` zn*s6<;Dk*ql0Y!`SrAL(TwFdF0!hpOg@b9shEebBWmBGb!v$aqL%GNY3?^-4*bGsY za{1H7fMpxQ$1>sx`V%7CG4#L<3u{o5KpiQfqXY!O4yj6}aOi!*6 zKuWPz;2si#Fjz=Oby_Y>`N14yqiPQb;%rNL?3_hg%p^i zeQ_(74jXSkb^Nl>w_!MRCx0;Nv?z+xLQ)RtSlY@;rdc}iGt4K9y-Fgc8^@{kT~|R^ zMp{}LwQ9qHp$#K~D?5;MhWTalnUYpEUVf4TN z zVV+|69@+X*9;}VO!blRdHRWziTr}^&RQlk9X>R(oOFeLGgqt%9?U8`v4)MS56yI_4 z471r@io5h1;Y<96|4s95-rocKi08YtfBwDyo#Y>6zVz=e`%Og%Ar3_nKC8#)3^8reD&2# z4_In&AH2VZ`{MI`+>`$p?6o~r_gz+9bQPNe9S$xumoSDXn;5!rITa3fnAm`HSnn_g z@-6GgS1`t9=?L$(K9!$)ye3b~cn^*RAiJuU*8keCQ89$!+mEq_h8*ljXk|^WMUi+r zUUs<~HD-+ag9q+yHTKlfnBrRk5M1eD+k7Csj+^>|4cD4yn?`Hs2Gam+M>J%KDccFl`XerQS((pa{B=SLStP2b3L%O zF*!_I+!7l4Il$RE@-0R)$)x1Hn1m9dH|=;`Z&^SN0+&{=@iEm zxqIHaWyteVy^5e*PfzjYw|)Kf*O#;$)tVR87XTLOD@jP(3OpdV%Qv680%(OH9{{VY zVee(X_vP}3HZXu02t)(sAguCLE}r#cFG4%l64%nt^36&}=JOy|Z@H6F>WCH}E ze(+1;1tS23Ipvg7O6xV1o%IzaiLP~{{-}QmB@tf652n1|A4*HgAkOz-8=P(BNp(*=?7fo(xxrRSFapJ^KH@pMb zLx9(oU(e7ZEkK7D_>eLH%XIcJzlAmCH*nAPZvc|?X$*4?&&+}FbLk7ZRu(Od^f7pe zs&8!CNAn+T2iQ*rz#d>T`W}XTI%9!bZ-G0d+@4{$d9LIw?e z%$JN`z|rF2aNRR~kTU@Q!*>6N)_V;7M0AyZcn7$_ujQsF^36Luf;bNAAB;4-ixzu7 z@=m<~dXzhgElB5*OD+jlJIh^dp}s2jBpY5nfJi`!(qxEW82t%;lY|h(?+Y39f3&HD z&y-#g{oJkNtZDML$lD_C16`*Nz}w^dXxCV5fN=i#6(Ue;j;L_Tw$}AC2Ud{HpS8mQcZbax}tTkR!uE1fUffzqmuHJ2ETu!j}{uJ6PMkegIaxzXR*c45DujQoM2$OS+H9@(xRUD!L-F7(I;)3%3IcSV5*27b1Q&0Bpw04q)7^9rLDYF zYcs!5k1+2Ub1=0>lOo z$bkz?TAOzKOjsG1H-H1UVnSu1oG{Xum$M!&#AV3(9=F-W7hmPUb>{^z+SJeQ@!!x{ z9?&=2HP1Z4yN&Q5%{RT<3~x{OWu4(Z{9sS-exP@I&wc#ip6(<6`{ZK}_;Zt8)7OXi zI-c$8c&hL3&I8geaWkgv=Vna*X#ff0_Qe;oecgFPn=zk!(&Xvx6KF$^bOHK|YxaOY z4=DU3xB>VZ?w)$G@SR-4GyDiM@Zz7iwLDXh=S1KhZRfw0)X#Swdpv|&V!f73&$#Zs zv30+2B`DW|{%`Z$?vCI03-JZq9eeWHAum@p|Hg;&*D*CWxz%@YQtpz2gf4E7Ub><1E*=(-(AAPTody+o&VdMWyQ}IN0HE(dHFxLb_^< z!_5Ej*O*)X_*r{C_IdOJ=1`2TXriww3~@OiL~}$;hj$MD(Uxod_#WH${JCTGvH8^= zp_8@YUt5)H_`O@ZaUPVEs!gv5=IeFXDgg|%{r{F<7qwMBI|^NZz}0AV-~Vo3zaI3+ zukUF+U0Z9kL)e2oNX$FXH#)5zw>xh=H)iT?A)g&lQ(IcT+uD7Fev7%o{<%Y`4c9xc zuurU4x@s5JHV7XAc(Fh<*0u0y*qjcrkW?cb408M)c!=nw&`e%7Q>7U#?1oc58f8_2!GOowTPp5)EoN?fKjnvF~CU# z?2`nD6(7Mo0D0 zD!`$D3{_`nB|{4yM#B2E2Vns#F~`t0)CZs_2`x*&mv;g7;2pz1gYU02u{mF{hu+L# zxHDHVCP@oJzGMJNVoOf=Ndr)fuMqMeWVk3J+t1n@p(0Oj!mOpF2OCWQd-rUBT|2RUEEu*Oh7WKD#@ zPZ~5yQ{X*#pzNpMXK@}wo|(TTpiW=JXl_!QqgfWCb4&UXRWX%`y0?oEneZ z?2%-c6D+O#Ip1gIeIV}xc^_z>KEV18r8aqew@-a{mU>S5RwudTz`f!ks;GD>L~5V#>Fy*0CQ;5!DdLBnE-o;S;A(8jTRpt z42%X_9yUMHI*3L$K2T^vlXM#ZDu9v1QCc>B(lg964rY`5s&#{TfYGL_+W0m+Oe7c$ z)-CHZn@DN!$QnoCV6-50rT1EG+DUDdM1%f-J9QQV*^kTokM>Ns{51t>*c0&V2IU=CUh95w3|%5#*NLR<^mWTfMC(k zY5lW4)AvM+t1U3l!&KAXvZfb@qn=1V=?7z1hJ$(fCx-m#$GgCyG3~7HFz}@%GHaY_ z!%;pA^9Bb4GPWFCH4MX_ZyfaFI>|a$SDUw={nTqAM01}1?YhfyrGE6{i~kPUv_M<( z(5+h!54@XI3P*aqdlvw(k38~NaEE~=X7G9EUFZ)(|2Me9_(sAO8^d=D0|RlPg&O_$ zsBj=K$MYKj2=P1q<1gIuO90ULJmgWpz196;82)JSTF-w2cuocG`8=v#j(mM`2;0qH z>iOt^+z(VToyykzn<+th| z!f~Z&_m#?P@6g;%J1hOY_FSRV7MJ{F54!}j@wir~GC4k>6K z8+cLc_q8^y&h~7D94qC0gM(Ib$G$bR1pHcL)dGm2QoLIH0TaP9(f+`;VcK&@r`Wzn zPbp~K%Yl2vxlEwdLkpl)z?Xaeb_>^9K%8w0>K|SVdm#(h!yREq!UB+BZ7SfN`i7-* z{srfU>F1~`N4l-gSj(OK*{*Kjh3mUUudQ3*JF{)e3cw$53P3yys<7qbx}^ba}=<=BL>F#o^<3z%9yKHSsJR#beB4G$+a3 zhDB4?`0?Wd|4o{zY&bmwD!K8-8|UdS@`t9bY^6Twx0Ywe+DIA%ZTj&u;7E+XIQT0K z0DJtTkxK5m83sWP8{X1}55d|FaAJJ@rRYe@p1oxR8qx?Zp?`&!4LAtRUVxYEzLU7p zUMB$%#j$yVKG&U+H?;x&+@OO2#0F>$HvoX}eyqC4Rio!Qs3;SxmJ`qWSj_`1-Owq2aKh1!pDWD1`P~jO?d*a zM_#5(nNk8_;U`UPT=h-4R9En{_>m{TR-z+2sV?Q$9(G3o?%8_atcE(2COH7D0?sie zsTcYN^CROCz$9+EN|yKlI_YN!Qvvp4E|h`@K)HaDCwYJab0cbIQG0yQ5*c9*{pvail4M>_ktJ|ezm(%RT47J->+R zOby#n^U6k+&09y!U$yySBd7yCm^f@I1>S=(qs4i(bmvF-a*J~vLbLgS;lTz2)%E0) z&5z>7AUC}u^`mRVR5mt1Z0G?`=$Zi15kO2ECjd)qh!n4!4RFt1OSp9};R}P*e&V`?HzDSdB+gnZH&Zb{u>e1KAr9=P2z!WPNt`BUt_fv zT4`|@M;O2o(62QONjxgQT&Vz4qgHxp`Au_w<&{?g6NB{akFvoGf}tWKZG|eYY|U$F zX;gA=58xhoAitXTly}+q!$gva>jHP$rczmI@wdRe-G>#l)gtwZf%zmb4zyu7S-=Et zd>drckHk>MzgxGt-yVI>_|75St^7{**SA0$)9Aqzb3B6!U{Lrqd3)MhB|w{id#m~H zCH_OMxUwqX9%-KR`4=Is(ziCiy-}~e7V@_1kY(MAvyS(`y+4)ms<1o#;%cAnX!rYT z`xNsw!fmnT5+U4CZ@lS&dn?ToM{xlSGxuP_1eZJZs3TS{mG!i7>$LE*;kxoO<03Af z<^q_Zv0s*VGzH6~UAvow&VnB1*mvIx8uu7`7&PgrPP+SpXtY~Oqw@|Ptp9I6w-fd< z58S)Y1NeUF?H`}>{Zbs(redUvPC1)?0297km|FcfTIno^@n}PP$XAa#c<;UU zilFWSpMdX+z9F*NPY{kZJ%^_@T@FPrz4X$cK}VXjtUqgcu78u8zR!Bc=D<{-Ve6hh zFF3p#eK*?5ZmYi3xMYLd?Yhm}sbBBzzPa<#Zi6*92!fCEq*nwnZTQ`l0tjt~i#K!& zBEXBQ{{RlnQ$Kzln8&SHXF;oBQR`m!mFo-K(;P?I98xO})?IFZMBtn0J+_Z|vj8xR z-52!t&5kQv6^&*}zphZ4-nU`L19Cu#t8#8-}?=bw6RkgrMPG@39RS19M)} zk#5Niy1N7ZvQ5ZqkJUSt*1b62S>9DA0P6sW0C3{E>T+=fCLs)U3jl{TC%_QC`S7D; z0HOdIK}!`T*1Qy_1CUAD%3{B?ca=fl6~f6aEl!%d;;~+#oB}+_UGelz*;xw5bG_jv?L64MH{V?XvPv4Q|o}z)oU{Qi0pOtxBpj+Z${T%;D}mwR z0zk6-bO|)0bxiRD(o-G<$YR}W&u=IrYg+hY)F(VDc&NHv!tz@w8%B8~Z>+_YFXD4f z0DqUT)Cc9FuL2BJp77(G1yCVCQ4DnoJ!s(%E#@NXg!2m318oRcGHKtgFxaXG>oz~1 zI|^`5br9DN^-f)L=EbcYv=_iIg_o`UGax_0t4~lq8TuGJHf-7+uq=RbziZ| z-m&Rd80IP7b)B{pOyI`%!Gia=dKaT_D`6M|7-`E>eFD^Ae3KWzc7#=(0c2!el9o-y zAqWYe1sF(bg*FoL~@;=a!`v7ZIfav5g zp4U5azVp1b?YJ_Z>aveHa4-MR*JYv8)qs0!R9OVGnFajAhDB0z0`###$a1~b{F>}~ z_39O*m`FdqDmTs(;3;j$SoBLWC=M6D!(7qTkM;w^ra*?ei{tSQC_})(B!BD1RgOp` zTDPpv$`{vFWGz=3ZP5boh7n^SEqt0g%p*xXrG>HXO84G-?=8z5w{fyOBP|w7{wgiv z1Ik2;NETXzfkA`hT&3_zgR9Hf0CSx}H2^3yK?8{Q66NA!OJ_$nOd)Yu%{D1Dhe;?u&U%y{)-{;P| z0{u;nJ|?_p(YQxG8og96bdUe2a6OgHqXq5_*?#F3z&%^1+ns-bJMptG+;Q)}>rR?F z%e~xuZ~)wWbE!sm_{*=m6Z}=VT$6j^XH!dcKpX3O%~c>UIK}K!9u`@o!MACHyXZ%~ z1HF-ltX7JzFrCHb$`>TV>y=*)G%(WU23^}G<x50+SxpDJ3FrLA?Xh!;5LFR~cHBEV2T)L(=E;xYj!2&y z3aZVQKvaOf>IK}>97tZ!u%~y~NqGb)7SES9oUY0pbjgNQm(kMcYR`R6Su^x^0r%G0 zJ3H?oZVv<*x$YfM3*RK3;nCSTL*ownY~6qX*agtf+i$Xx+n4?r}KsmBt5FSOAS^p8~AI zzSIg~y!F;w^IT1!pFduz{xvHE% zJpGEfi0=$P^*{Om?F{lvAYbaRQ2~zODhU?UI#h!>!^8`)gAD`aWuwXlT8FZ2Q3u-WXQfRZb)72Un8I~{ zEtxB#X_*GJr4EqfuQa7y53tu8Z@dx8K48FrpblSQP^S(+0wxBV5%M8F*)d(Y$2Pz+ z7>XRCwdU{&$u0~BqH!8o-<4;WJun^CS!bQZYpu%B19U=p(6o`YJW3a)A_nGcxirf8 ziB`EV__29qV=dMAmBQoC_+Yba!3c3Yn?8L6Rq*jLM^^Tnc+yh7svzT-U19My78@>gKPx>(Vkql2V zFy0gvyQgPNb7-}w2hMHe8T~!H8}0xuEc1J;2krs(dC$)=*rztl)xdY~!`HhWtM?4< z`%L^(h`;GE$AZskqa_8Rv#a!YR5%Pqd{dhXz-o({CBf7%i{ z%)8~ZoHqIc_nU9(8^C+-zO%cZUzRS?n0Ngbm#w3`qrqX4zmmn~C#erw|4Pq*wh-8W z8GQTgx0mX*QoPuo{>u7>@0h?lTq6Xtd}8&w002M$NklndO;uZQAxZOZ@f@!{Ji7G;RLbzcJnf(6{jrDEYR( z{v0>HEgGMiFWE=X{sMZloR7nyPTVui@o<^)75a6GR#q^T3Hz(3G z;W2oxZgw8O`Tl6n2ypC;-25Fp3sba~!$@|tGJniDPFp)0`c9IBDCJpU zJR4o{h-gyCnwQzA=Q2*lUjfEwy@dAwP+u?NN(V2>)c^jiRv&tjdJLaK-%O_ zW!F|fpEdQuY7OZ}+Kiwv?p>qRBxrH&v6iu{h*_O~56}jD zTft?`Q#9$cT5SduX?AiKW_lWG#mY$mt6r1SwTY%rZeW?=v~e0fl02)MdCsM7T_uU{ zJTlE0ybAbd;|nC-sevBY5DyCjf+yN2N3jT@KHaR`SUC*lQd>Ni8Z8qeJ6wa9XyGNT zOXCe~*Br(%s8+>|wBKs@R{$0z|X*W83a^*efJmb6GreR#DfQApK}uxn_VS zMXaDaMu-O(e?>a_o^c{h0EA)Z&|^woc{s^e&~$i1@N5j>xPGYMwMQ&hKHgbC`ysf z8krn5K2@qTrRm>wUnKgsc8Ml0?SF2y3f9+Co7pqQM^|+)_xJSbY4d{KRRyi?wX($< zO7DZv6QW)QyI)+Ky4-Tg2#JrN2d07d7YS_fgafsOa z7TqNTyd2|@PF19)_TKExUCWVHRqbnYJFQSuLX@kn1Iu)1%`UsL5CG>g9s6 z->_aUx%4rV6coGjIOH)|csg&=(T>F5efmL6^D^&?gBLUp@Iu>Bs8$+XVYOJYC-nN@ z9qT|esuJb-kuk*OwUPGn9XG#ttYrJLqy##jjdS&QBMtue-QsPI^c8RmES2`;u{zs< zw!^~l5{Foom)=l7@nDuh!^Q$2BCu>eu!3Ea?eucl!6Xp45@)xNd`W!Yat51W_Copx zdPYc{mD-)3tzWhp1&e~6M?x;Jd=<}5{p*Q3vL`Z@U5JIVH-u-0Tv_kw&olcTk1E`Z z*hUwT@!wh0E4qjvoLBoM)RO#vxMBt<>gGhc7nZl*T$f-Vtc(vuoPFyb8ORr*$0mCZ zo+;KhLySkmwgJOsG9PtB9rk37VhY2XteKdMe|h(xSE!B9b389Q^$f{=q;e%cModpI zt;#&QJB?8Ppb9n1>b)~TogS`BDlnP1!Sxob4`)<1tr)7#h9$9A#IZ#~tIiMHwEYmN z*;j2Z>*tZi;@G1!JSigOgjS1GN!CPmB-CT+M$2PV_bA_-ih+EQz0GNaRUDbJ)75>T zON7+zkK3?zS`>4x(Q zSt{tBY82-~zUZ^C3FYe-jreZ!L`Bue;u-q&$xrQSSbu18MCwdE>WV(q*&IWI=&1)Y zn-4u>&y3ZC^_qfGo7u&v#EykK0r)t^8jVEW)qr2W+8RXa$L!R~pO~GI zt%^x8J7kT)*i7-6rG|o%cUCo}0HTo=QHPKFtK)>t>S z>ZlDL?rG8-3XI((j!rS0SDh){LvTT@g)Ss^djaT5&itYPf^M~k zUtoGfo5B2^5#$gO7!FuA87!j*y3}D^ujPDkCR#7&?RdOeR5j06l0SeB$51O81s5il z7dAVfRMwkmEaPYq9t>d@z4>Zj91G4fxoEF+K=`P-we6gBXP1tp33jCx{EeR8x+`Oc z@pDM${U%=^uEQ@e81D#j5>QDR*Ob&={)=Ko>1aB9Io86-{5l!B2H^Z&2W^Boq5B|U za&YNxA#&Y?>MG)%C%jP+AUg&YV*f+A1ETEC86#=q_A?9$U%4d>m*xq-0iB0-B(D8| zY!Li|C0YaYyOl;w0yc=*Ne@&iS0nyb)XZg)Hl)1oJD2Vy^R?(LPBI-`@?Zz0Nc zBaJb;p+bANv>Cl3qa}e9;@bd%Qh#(sm%vxT<_x^@?cEY>voIFw6CDhqxp-gNyfvk^ zSV<@1=8LEfTe$BXTI?VVt5&fM-!ZqM7M0@4p3WBNo824?IZnTGw^LzWc?94I9_yFo z;bknC*vWpcw_eKfX4+}+2mAPLhptP`v>^Zz~UyAJ5YtON*3~zz{eMl9s_e$)~fH%Cw(<`H+vl4pHrPExspsoZ4 zHGWL%R%WXrts0c;dF7i-4RlQ;%ps2K(LZdstFk~;{;PA8Vofu|1z;fj@hq@%O z->X^_Ny3XO$EXx=d#RXn4XI1|Id@<7J@v+e0>R`&SO)o0?AJz3S2~5040OJ!w)|O* zzjDT})PE7K1Nqss4~$df7Ecq=ck9}D;!YAiwnCf?hC+l4@C?1}X7g^umf#fbzE$X} zgnguIBnk)LM~D?V#C4RtLUA1(2yjYv z=;b-2o%ZY5SFG>j-AJ10#Iu?DWqNegW`{0(8hv>*WT4b!;?&U0U+*K@vwU$whe-Ej zT;q5W6lWcHZ;dM9G=+4v*9J@Ox3`~qGpsURon1PTGHrKj;?HcVj;Do)?YyUrP@;DU zZbo*#V}v$DXT0 zpnh*LO%DlY>Khr!T5r49$Nlr@yiBY=p(A=xq;%Rv@s?i5rp$1}XM3e{yhJIBr!~*| zQrOpvB=2sP&PC21<$Z`AR?VJLB>8%*VCY4ZsxNxJJHaVT;7h|``a9aM>wxj=G0Cxw zvX&wJBng#Pg-LF;)BDy)Mi)R<@BD+~pIa4TH1l4Xnk*qCjF^ zV0#mfFkif?70ptz5ND)J4m-mosJ!UY2&ZB40eyogHqrg;^B)+98h8DWam}o(^U~&=-DedpEg4{Q9DVpg_1R^8o{&U39f%nL zVwP*=aVqovG{mc+{wrvdIT&>1GJxTWb^Y&y@6&G9de^$sexFqT7!#+YsFV#(!vpab zK?h&s;zjlyr}M|_j;$(x6(d9prnJzHBBJZP5 zpUcicXZf3|^^Ts=b#zd55dwvwF4RB)6>_k-%1PyJ1@cvoph7P7{2f1%m%Z|rL=^=C!kw1}i~^B9QuRgIZXN&IhqsuX`k-4dOW1p3*d%qJ6abH8#7v0i+g!KH zcmCO2b7=gTIK3sYzMN?Guk{=fa_+N5((~n`MOWmvFGK_ed0-j2gs-juQ2Z0xqZ5uF z{I-S+U&{26%Ai7Z^oofy3!k49P9Fycuab5;B|_OxL>Tbge3V%_EF-3G zOKQo>=n;Mc{inMhp>aM|2aX?Lye!Dq8_v8-WSVSGkv!*80`r(#{(2T@yILlz{{{ZJD6md$umkC;v|1RBkcXWsh2r+?r8^6u4xA7b z9IIeEyrQmY)8gJASSA;CHmti`ik{j_35cB;o6K?I&v@4gXBPA_yhaBV5V}~2Pz#{{ z7GtW^%X0%Z+Qd8$9>TH1ubybdEBlmyx!Vnfe~Jv&U*HyE!6|Sh>v5Y&`tLyz;9K*h z-txtABCO*a?m1};K*BUguzE#z`&>rt-OHKwt#6Mf|F+ALLS3PoB9g1!fQ0q5<#?_N z*BLJ@GcVLiD_!8Fj%e@1f#8w5=&)ec7g^Ie|KR2SLyv0C>{t zX_L{qCIjG_`2eTHJ?mTrV*Hca%=`QJ$F}tmXaBzJb=Yp>H_7&EGsg;9O6pTPuY!kg zMiQJDC;P<_2go1Yqo9Ac&5?qAw|7JCN@f9Jw^MIYngz^;Jh%Ne+2j08Ay@x4k053v zmsUHyT1wEj{8jJU_tZsf0k8KK>T_7&PRl)u(YCjZD#Z530Ukq?jON-89mC@)FWh_^ z*f)bbh?v<@JLv2y|46yL=egFFut7sI*x1he5jBk6b3!y+)QvyO_Sd5Lh!Qpi{Yz^% z3f(W3k{tjk)Lae0Ti7O6T`09pC*(?T8z-8#INgt-lCp6sh=W99H?75eCWfLBcDrxO zw30IJb{cdnha*9BXewqg|%9|ZuN0=jFLJS64W(lJ@#Rs1;xRAj-=;fMCLoEs^SJNyAyu?io;WE zhY}Q{YJoZBuJvtSw>JJIf{Fiv7A%{ft)u5SseLz*SmtT2!PMQW;W)U4$npm0QNaDa zSqk*;iwuAbvT*zqD{y%n?oRr#uadLoWHr~KKj<0o39yuuPtQA;QmRv*MDG)efB8W- z&c_%!f1;RJYTNOM<*5Js=&Ar&+}qyUcc;NH8C+_ReJ3EtvW$;~?2-v54@mMpO(pIG zEEne??C_?2S_(u=b7-i(Pba<5pX?@olJE9&s2A~ zj1NSbNaUYb>8B|`9u*weAaI1Sv_1$Dy*fm^wnBLU;sg-SRfXc+-dF>{%?qE z{9*VCUQk?I+@tYE#PIDdClSSmz+#<=PM<~8RRS-EUdzt;V&kgCXE=Uz$L7qW{%64Iq}V{FL4 z#W?tOPdA;ME8i?eHp8r8Zn3*U86bUE<(rJe zsd5GVspPePv5Vq zAK1DXyST#s;iAp`IA*a(&x{LjCWSnjWHLK&yk1j- zi*$?P0WsAvOeuOkfVGB+8N!)~MsYnc(QT~lYtN%@a!oDD#p1#&+AS)sN`M3PKBxh? zG^-ETJ(M}1bNn#z@UyJEy*w_PGeOjtp7vRs%4V{e?*dvRPauGPR$h*%vMSh}bs1vk z2w(K2zc`FJgo5dqnxEdbVZOhEZ1w04KHj9p?)x z{7bQ%8x@vitX1Op;rMnsM4pxo8PS2$D=K=*)hTeO zv)2J1FVua*Q6}qa=%5L0xD$%~f?}Rh>PK}Z1p%1a|>fpDfQ2wsQ@$AfdENc-l zyW8BXknOMtaS4F^>vtlZqRz(#!7BtSB==zt zILAT9dDEwecqB69K~-C{E5_T}j86=%-@r=+Fzb3cw_RQLH?USHEiMaf{%tC|U9EZg zmBak&X%{5vSk_~}FqZ!%fNc(}2Yx$#gE8b-3ce^m|6N6)=O^WR_=Sw&k{x=9p{|a{ znhvPuU;EA(7G^!{(JY|+MyPu|@{c`?a-I_iG7!!3$S)8|j^gl@R!`6>wrRONY9@-0 zTdJa9`uUmQ7Q1nE7hpa{0HnViIlx7`gap) zeUrsHYBT7aQGxN5zBasE7b2%@mvA{z_q3XZRj|S%rv4CwAjX~AB!f!=p=_}Ld|*)Q zCr<4e>$Kih9lLQp?Q_hX!kam$ZWBMqmTX{s6x1Vsxj*awFxz*b?_g0s$PpFl&UmxP zdNf`zfoOPY&h#@%_k*EuUUAEJCOO7Q?0HNTCea~ck#;G;b^sfO7@^ikdYYhh-7_YusqgDOov`zPV&c(A;T6{jdunfYnSty)I{{g zKj-4E5}s}V=sM^!gaS5g`U2Dq8>Yq!pF|8reG=)m{pfFPh2^I9iR51Ogd(;Cv#6Qd zkoyVtJQR=LMq+jGMpu>YP;mZ9VNcD`7r_i7bxMKHYv(qB@5lB>5#!&wTyv#gX7=Rvc@Rk#WD0%1#I3~ ztc}4&!>g2-td=&ZCY~vAKDU=%w7))>El$ERTzi&%4-{GVmCrPUNP~X|lVXN26;J4s zwP>fSJcvB1JUASFo_HJ!%bL?|d4W3V8-~;`iULIC+Nkq+kl>+E{c+x5h5bxa zc6S3QGi_H6&Z%FPM%{G%euE%=e9+7ZE094royf@&r27c)wVfvz{k-iY>R9~T$8b#f zA{YlvUdPgJ`Y?Dl?4uPTEW9Uu>KUt8W~P9r4z;i1wD{r>bM2$3C>I6s*4Ie+yF2}6 zTls7f$HFq0jesJr>|=!E)k9yCnJK&~W5T%M+w4%9ufOu&oX7RiKcL?~am``k@*dxK z;+Ir7%QwrL#?`iNMb^{Ta~10+vs5$8zml7kSnUo~ST9(QhX0{gySC_e)ry2F@EBHb zn*b!~O`YSrR&nfDrcI`j=EiGIMu&ld>E2YY2-xOV?njK+$If2lEz z_(zP7SV&Umc4(3fB9U3H@v8QwYS@CPa#=C?=ZExeE1n+HJYgY2@)QrI2<%@$1)om4 zY4e3U$j9u;hU)b@U)aI{EvG}99W#s_-K)>-=5@cg=r4)C2WhF%6M4C;)5S&#Q?p-S zcG_yDy>4$^OgK&5m|nUzB$>!{g6G>tNqlm`qGpb{jL)VLIxef7aV@s^+B|EBPu1ZY z2QfjSM?f`8YJ^a%rt?)8HxfsSnI>JG7Yh>w`35Do&&>$&y~~6H^kPJjN_0u{@d^(b zm!=~;h|$)b*}qNU@H4o7>^{-bVqtcG#0VtzxT$>cV;LKBD0~sS%y-#_+jbG`MLpqo z!U%R;hsX{ag~lzH5Hj_}EdfIeNZ!xEa~njs-T9$~d6|PZYBtp7xH-@45&+gBj(4S= z8|f;S&#D52kAGomY6ov`0$fM4L>$o1>Qf$uu~5#h78=j$^TePxqwr+-jAeV=bL`3x z4vqpsri>DU)qiJ%39>oT9kHc)6IZS{+g7EQqRBx5L8K!(?)kQSgz$(;ZUAiuDzE&$;cBA|q|l_sAihd32sOU;P0>+LKk@2jWdB)3NDJ|Xf6lZFU_`64?b`1LDmTx` zGr@=mF(BJkX6V;(_qDcmQXjCNxlfX0uuHUO zOrJC6fK=+9%Xl_=DLc(y@=;NKCd4kbXTy}^qB8XcI7fb9D-Zwv-(d<3GrOE2yf1m6 zW=$Yq?v8bp>+^S;`d&(Z8!2Q57`?d!My4h661eYS|9cN8wD|#daiE=+9uPECP>Q9e z$!oP3G;nw!uy|#1-e?)pWm69P1L)3k=tgKp6<(=`KyBtzPhvgQ%UL3-gb$?|=c_*JzgzTq7& zcY)jB!ts-V<&}!x2HcYat*9r&r8?8U>k$Vuv5c=pz4-MAMx^CJB4cUz&CNth=0fbP zZxujH<+fYP2%dVx=AZ8nUk5k6q`ie{rolgKqnRDPd2UO@()UkSPFIeHTla7z_+`hw zDF*7@x(hA9P%tA$+#H(n^FOm88DGu_b7}tIYzc3Du7YG1^AH_#0R1!S=;H)rt1b7jD@=d0U82p z>tbd8dTj;~zz0iz-J>s6hT}T4wcTaHu^Kuf2T@m-{>VSrtNlL>ZD+U`uh(9%_L`Gv zY|_bMSg|1zp1>Y3tq5g%t8YxS_4j+dtOLK&?d!Mg8{E(a$X{S^bjckrhiGoH1`e&$?0U_<0>V`!_?||6WS(-F^5=3 zt4{k;tHQ9G?R|wb0^95Zpn81dnx}sAm+;j08`4h4@h2b8_yS|b*gB+r{SOZ#2bxlA zDVNm-{{s%uZ5evzcc))=7q&l%?yoA|HJYhChRZJ_PR3*8Wtkr=?dm5*uwtqv!W#w5 z4k(4FFw=XyU=MczpQUm6OU8ZZ0+K@k4c;K;Cpnx@ySo?33JzGBB(LgJS8J|V0SN_} zv%Q?hHrlsnE5(c9cQ|8~rCJU7pR!l0zy(!~FF4)1-D)4CxCkyjqqK2$%^6SSHC*UB z>{DH@<#nhyaFub02}fxI;PpFpQajvoN7bu>oo*N;&qOzwI@rwRg~sQFHu*-gD7KjC z@qqk*#n(FNfW}M=(HmSvDW))m8jf(E+!2sSay*OPG#Qb`Ej8do{k> z^`F?xc!r>5TpaeGK(CBROsuwS8}lx%?^Q=8_uj8SM=r47puVUr$yy%Yj|8Co23(wy z0F`Wu$3Zb58=mZw_@a)2A09ijCReoJN6n+g&%yC9=X0Y$x-9EHNmVj_0k1fh zFmao1h(bd?EzFZOoBH7Gje9+kQ0nrq6B6%4v!Ai4XTlpAc)H0c?`0FmC@j(NXMZ!3 zX<2SwBl1X@SgtBA-GO#6$WyN3p7s4_?RkM=oRpeUM4WRp6E>C-#**_R`mvC zI%u9s-PWnI7A}h4IFF+w{_$WRv2%N!zRhY)tSNr|rL<&<#*hX>G1uN6VLg~iDpXiG zC;wC{%2w`+r(i`>eEOHx`of`CQInCJ`2(N*g4+XhBF^})tG^Vk1MszQ-GMNTbfM=` zB{I(2$WqE7W_qdihnxMfJx%_n=oZ4#2eWO=iLkbzD*>vAzAMC}T!!tSkvO-d%U$n2lD4wpVp-9j(U^xjmvF zd>RKpzYr{*!}Z~2Pi`gF0&&@;^*a{|ToFL zd-3ei&_OLl0Rw*P;{1-QHI{k=Mhf*v0Q|k{uZPDbsWFXm$?L0F*v}!iRYe%gIl?qu zqrwy{(?#Bj%hEXpNV7icn1_X!2GMNmnNq^t(`M%9CrjSW-O6#P+`iYQl{9O05rZ2$ zD60j<Bu zjhdBAJoSYiopQJ&6K_li5EHayJUcIKC=IfQ;E!_=6$YO!{}H`ABkjn zZK#Z`A3Fn_CI=*&7?*!EGl^fHEz%p_HC0Sf+qid^Bo||syTCV9eYd{7_i{tB3xM)N zsUA}(N19kMLSNxbG+Fz9UL+VL2qGhxsrekEcExS+oJsD7ss(Tmkfes!_khc)!KPv{ zi*3GvjYF8&ogv?qAqTZhzsw1VoUw0U1!y)V!R)=_QeA!D+{vQRRqwUVW3{FjrF}U$ zcWT=vR*lneQk?k8JRb1ucW0cJR+jNkTUet*$5%dCtPuZJC+n~CQ@+EPrxUW#!ujvN zI=o>%V=NnZMEXI>5lB%~d_HJSM;REOl2pDW~)C!PnZe<3dY`CVy zk?j0_YAa#pVi9st&lI;Aqk*B#s}M`L;+g@}3@w;TrP?V+h{-)e(4KZfNLZ1Kv^TrU zF3U|voT3dS8IRfP3+|`Z&2@l3&zm_d`yr!rbm9; z);LIL6M_Jr|6fm^tb>_{(VOawP^F7|QzbV{au>`N^1K&o@#W@=De{xuK%4YG9f*`C zu69IrX=)9wY2xn^qHNyESEKr5A6x*5$y_?V*Y6bfh98PFYl&ChbPR1UeCC#u+M|lp zDwin}&ORSOk2wko1i(Z}I85fBU{PF0Tqj?WTiwxDY{ngjl##bxgNENHdK%6Otz;2q zzbA&~Jj2#*_>a)K3MkH=Ye45>q;f8R?GLk`jq2A&c)5T(fTQQ!GwYx=yK?}YN%CzS zHcs2_K0e9v6%G{rDkHT^oZ^E~W(`r#Cbc<2CaQs2)mlT39zr1Q&C9NK&v}9_-(IZ9 zO7k7G|6KewbDal7F?xF`Rg_uW^M@x0Nx~-^^Z;pq_)QnCLx~!9gEW;25Ubj&nptCQjC8(I&iEt}gmN?nTLcEPpk z+BXn+;5Pz}ysYSO?~SWUP7Y{vR>jVdvU)#nWp$dmI2h=!wHI741JGPFvDCMMB zvl(Y?+pSeRfY1WQ_2L=;st8O}*htA+EJOLR-pQm!B`O46e;FB;tiA~6Ft+8dK9Z_o zDmuT94Dy9x-I}NArkD;fJU-ORXHgU^R1>cHZwP`HUuJNx5d~OwNF7QYh-Ng$C{=uL zwN}SQhG2u_Bdo`aK29k@?tGVDv<~E`>%u@YLx*Tc- zfV}sHGx`$9F}O%C1G1GMm`%jK(fW#E^X??XEK+HL$MK3IzudldF5BelqRMlIY zx`ygFEr1p;8Q&PG57Jq&xl~{YF zW(!FF$d9~6rkK+N|C!IO3$HI~fqd|N1wJayBteT^keHejAukDfxv*Ak@X3f{dt@)o zsWT}q_NKU6$g{ktFGNo^J={ol?YKjs55%HFEI($DZTHZ_5L9-?19d0?i4e7DP5$Q= zih2{NQ1-hS4YPUxWv48Kh9f>pbiiQpcyt;b;%wd?mpGfks;_v*xeLDlas5rmJ{-C= zl7;`()N#Ziw}tn#>FaY!xPhDE_zXb~7!lg@qgn{LeA>x0gsO^h@uadyxk-XT$ z)bMxRyU4f8(4fC=35xP`O6sL-im8NpyVv1#jzt4gL-YEoLxK73R)N5*ZsTwF6HDLK z^9Zi=e3g}r0G2AbJP?r(XtDW=>Z&ph9+A9XI>U<1gzE|x)k$G7yxw5vg!fM8l6e>K z-9h=p^Y451QTwloyEaqIWG3^ay{Zh$0e^lICD?cTrat`|IQ_B*UPMP!x(l(nN!p4M zGai@pwvPQZ+*8D=X&~&(G4_#ON0i@o7oS?ridsmWt;d#6G&Ln@<{>rPygH%pO3B2P z0X%b@JRxLYw=fom?!O%Ba;ofp#Z?ci_K>Fosn)ufukAIPFV;vItt?e8!kk`-s`+)E zA2d?V;caF4;*Fm~@zyo$AKll9@B0Fu)t~I-f(}i}o7*sc7%iDul?cW9K7uP&Ej|Ta z97?Vl8bbrFgt-xmnOm=XnQj^>oEK-)YC!RtE6te|*U3i9s1ui^IJ5TS%tOk{6WV*H zcHbke{Cwo7#A>4E;t5lr_9?g}1q+d}=vQhTJ%ambqCTi?z>71m9?=qmJrt9={oC00 zp5Qxf+_d>xj>qvp@$HpY#8pW#{$T&76cgT>5Bv)=g#lZ_Voxdt%dpTC>2Z)^yJyAaA_zrG#YcZ9-&Jnmg&i7ndHrQ~Mo`M(>sGqY z_yHaq-F(wADKe{~0x*ZvmFKz8qDY~>s6~jG4HHz;c#}IFgy3gj2cn)3`J#2k%9uwN zjJ01P3!uI+QNp47E@g_nM_dr@6;tu}6duWzCq_<_g~FxmvvC&oQn#fSQtp-yHH3Oc z20|;kR@HvZQ%u#ci|PU1f$kX#hgfg)?~o4=XlhHX!(Xz*fBR|N6reL-ZU5uHLFmws z0*|=tRi~oZQl|Mfe)J(9B3O;{c=q6_VQ@TIG9|zQl7uI z-li+8hq}w=C^=jS<7%>Rw8e?=XrQdY-r2|FQq3afA8kHzVIl;DHzJKP9$s|6;9)U- zbhu5~iR2aWuIc!QA@@n^>J_!nT9)7zw&ULs-!LKAUhP+f#B+oY%6U9}FhB$sP`Y0Q zlvp@FbW7-!J_Y5Bgb!p(ZqxWJIeIt$S6e^ozAgIrzl!*N$qx=5 zl?m-Hh;rWPRmigGHBx?et^PB^k*(pG-pzpTKoE7sO%`=gt|0QDqAj7s;PD=?SIoVk zIbv`o*Jv)GkuB3Yc{xg2mddD}$@GAX$gfn{g~LvWHK?_l$XK4od_kChR9-L@K6yf^ zp`}8gea$&XzZGeazLC9E;IrKn4CkL_i09a%YQK6@fE00rS(_Z@*^yOuvqYDh0gx1Z z-_H#{I0Swpo+v9LQW;YWzrL(*XMo4&sHkX98R!3&Ld(Z4Bd@i1jnlK|O%ps4iZt+@ z7KP&?|EL%k;{VnO=spkS=rxseJRAKmk`&Wo)3#;cO%AS(uW5V)$xlgYs_J=vbtuOs zb>_R&p+kSZC@3rm%%My9sclHvP~UeHR+30d_fuz4bN8Um=v6T48y7D`iQ&!!a$716 zd{W=o?nuqQSUxj?c~rKt?Txr%*v)qUjtG0&Ia{?mx^imVzifb*InqE7*vHsIURT*t z7xHxTMHeK65T@vykqb;s3b1&7V}D6k6R0! zm1Ew+gspm`afJeI7j82%1nY1Rc8xG{=ntA;6`TRrCs9pBq zYW-*2uYCUJJPY+R=z-JmpHD2t4*Gvoc$JdY*sN+@`OKBRamVSUXurR?;My!UwcT*1 z!~<-qM$~#s*WSI|xOlgGV@}*nVRrF*W*oaIhS4xl(_ha}!3squN*n=8h97>@vo$Q{mtD=W({J=Z{knS9`s(#CcBzdsef zM936OI(l<=Zu>ypC(CWNKTfPmhwvp@|vMb*JhtMe3 zjE?LkvmB)(b}$7oDEP9ngig0p#)B2nCX*^h*oqE2i?9jdH_GSz9lDLk(Dx8z0|4@O zR-$GEaTC;t-k&#|j6}Ob=RivCB?|dsNKb*}&eajLV>ItY7`7Bgnajx?*ojsLcQ6#o z@P@mEmw=gmy!L!Dkn1F5PBd?*-LETO{OzWEGlk;5SK|PO86Y~LJookfFAP}+QwoUo zyTp9V8e9@=S%cMXqlt^wT%|a4&0(IJG?iUy{^WJs%~L1Oz<)mldhra%#uUD@5m$d?^Ll=?c)M`+r0kbDs9$F{;CY^kL+g1HrOQvJW2Qa-}#9SdK()h|~ zUVc>GVFG1$Mywiz{fx6>`^$%XE+^f~sb~cX(i_ZSz)HSZ@YPxNFdMplde`&v+@rrlYsd3IKZt*eMvV5~{I{)Wm z@xKAsxetFoyK&6EIV!rK4=njlUA_crbFtCD4!?%x>Dk%1hc|FLd(3(d3C|=6Sy#^0 zMnQN&Ez~<+Tm5ukcbcy$-3S{}_`yNeVEMiyx(l!Y(iil*LV9=hop|@@_hQVDTjmKz z^Rp_&y4wf?jiDRlbWuWw(Z=HcrW{plKY4_VL7(VMBR4x)-W3gLG&1xX@=A%{j=yWRC86o=W02FEGG3`{$T-?jBY}sQVVQSSq%FGou zPoGA!ohNqcxstPD`tTd##L>x4X|mH^oKgvr?^>nSQ`6*P`jf}aRRu!tay8bM)hD2tFTgi&v zm-yQ~xZLj#(magXUIK@Oi6QOT0kPPtL!RxI?c#!gD_iOdBPPfm9!Bo%S00S7QG@Uh zlSD|D{~#4p6R|bZ&gbiGos0~`KSsahgKX=oW#1Yi_dfKVeN6Vl4`1JDh}4Q>-E9!| zmc`L2v$T!MdX~RwRbXKK@*)Z=5Fv~Fb~b%7QkZC z3_AML9pR#44Cgb~LWo#j()>KaBGCWBYr+)nOxd6$Td(eWC%V+2q#Sb7^w<_8^LDeO zk(L$@o_!_zICOegcRo&dv`&0~A*%~Q9T?`lXX^2TbVE9vCasM&G~7rcSom$Pc`MuA zPN=_gV06C_wlax`n;K7DUbPr?<9VwsQzY96MnwHV8gu1_uyv~BeqDn{42i}Is2lo` zk2)?g?(%-MMYg^XG*;sBy(^1ZmNX1FyPND)4mHy{^l33hf15wuL$X@)(EYdl$VI0Y zP}GM5SrlG196mPK$FiMO|Cu~qHCM2Gu`iCvEW1^yiYP9?s&E_#=P^>sutryowsQ>1 zs=U~L4DEw%jqSUn9hZJhhDV3#elW=I4pje5H%);g18^bSCp384ym5M&8{(hvdFn8& zHod}72FCp4!AEvQn(pAf8lmhx{`A&qUUZc4tjCdM1hzF!$47=}pqyzsi_Vy8fb7(O z7mEcwA%xPjol~Ls6#K!I#@~lLS_EP){jB|;chKy9SbOu%42lBO{?t&t74fA5d*B`m zI>hijM3>9IzkP4y2(OX8Kc67_Nk!0UVDp=N1Pnwkh;^b*BKwV=YXluuBj=Q{6&|db znq)r6HP2>?fvQfpbDO?)bv;&fUgB|Pv{Kl;_NtwAO(C@5TiTCdkQ8B){4!b8{gQ+# zOIC_x?7Ot{id@7N|I&KP8Wt3{3s}B3%0ayu;te2S{H%TAV+@qcH&J)NgxAbZulDKl zfA^||IcfSmp)j{GS_0x&uDm-NHccpTnJcs$ZNYZJN1q^`3!K1Lb#eY0;=V4u<9l`) zjEBheN6Vd11aeQn?Xa4s1lANbSiPl3-k;GJrKtc?HC1P4V*I@5j4(-z-v4}z1+iP0 z$#`?<5J8Har*bMW;?`ULIWEhiQDDLI! zJRKZU@z<4;?ETi+x5_JVYYDNP*kvIGHlVzK4N48Gwh;GAq&p;+9tufuJ7%hIXOz<=+c)tA6fAH`_u?^itkR9T5>YH@q{XXFIL1cTHfPpH{2#PeWZe3DyPX9KUgm_Q-1L*9+P(>z*EQ@VN{cj)x| zmIbr6i%1fqAb#@746^9KLf{`3Lr}L?v^Zx>@Fna#b0=r4f{}J39_LMoQLP;7CYS>! zJf@&Oz2`IdtNTY-r3eAp4{8-%!}hB*PoOf`s+{bv=H{z4I2k}=*V#kFki*8>h)xn}?~=wYv)O$5Yx3sI<=DgQ%IF3$w}hy1ECarAM58H*b@bd6Q33eAR6Fgp z-luhj&k}Ub1bTo(*oEE?HafyUC`9?Oy!i;vA6R_Nw)IK-PDnJh1t4#}^NvG~*Vba8 zKl2(Jyo1TA;=F~`D?CG{^WYOC0+8xb4~ir{d%4RwRFYth?IkaB;Dk;mhztqM{R|;D zWa^gcWQWsDW2S!X-OnNfv(Sa(|RsQ4rA(RneSv>wLO@ zv@^pgDYU;Tx;Hf=@6WwgaYNxuOz5X{XZ?*1^vs#HnhLXN*R}><0QeGxj;dthg#y$o1Y7qFIeMEh^FA|h8ub6J%DbR& zn_qr>Kv`xy$!o0{jqKtVN!xbmn^pKLX0PPG4N%)ZaP{7xb=sl}TkX&MukF_2mNT<7 zv+;sydh5%EkADQWN!I=xlhu6YW~PFsZIy#YmTtN$eer+ZV2oVj?X10!HWxh0XJvYoE_jCOh%OxHU$wtMqCE`s9rUK^DKGCb=X95b;UOyI_e8knx)2!G_O43$<039WmLN z-=$d}6CHVJh08<3ISLHom<58V4a#5joL~`-Gbv#v_wgAb-zyaj%3%uMX9=bMrcIOP z9M~!YJD-XXalio(!+YvO$?*{30)V$;1j~XyJj^fQNI?)*XIgzLt^Mbn!96aJY>VyW_#-CB7Dux1;@81!=WbvSL4Df zJZ+m}3$HwTzC8@me#5vMEdkGtaz;>|ldf%4`0u~fV`9JtTRl3}#2fk36zlmBQb@m} z#?6&h=|wkvE7R#e9z3Cm{^HV|cU0(nnrrC0CW@q6KBJcm?cb+HjBV+UtM@h}(*ZaA z_EQ}U$M0o*jz8NBPS*FQBWd6g5~HJn9VHw&|Xz;6p$hboW>%fq;##CeTHBcLS_$iQ? z<%~=@k3N|MsWK**4|6rqsYuD*OV)R25wx|~VqNbe?D!?f#CgWU7Uj@<)UCsh9R0~_ z-uEc98>@VXzKbW%>xg|8B%5^hmI_80DVfZ*zJn?#K6UJH#0xyr^tK?CI__l+S8o?- z-m)=E&BNn~?&ywh9by+eSM$*oQ!WEu)~x*NQDpFki|JvZ%`5-myxwx~rc~F;s&k@S z{!{SIu}tjG54zB22F}tb7vE|zCL1cZk7=&fA2qG63@ykTw`I3H#4p&$5HY%a!|Dx_@enNndUls5=CmTek-71WTVz$?9Bdn+0i zHXhmk@Mf2RU3mF(pYo@(xI{I$h*pv+|KEaA;GmxM*U_F3*;M>j*;!}k`MQ^Pc>*~Zs1Qvxj?{rlYTDi_V%NKIv$onhz+5w zhKd^`f4L^+m{a&>cXQ#p4fo!*x_fS6^Lpa!7k-ar`Its?U&P#$+9^)${6@(^1IZn5 zy1WIy6)<)JYl%aoZ@fw2tBwb+FrKYJqm8L!3lk)iy^5o}ga0^hgUFzENB6Vh2UM#zdev+20nQzBS*%ZgPRw+6cJCu438ao9J`dKCxdGQqAL|L|EFY7 z4o#n>1LyhmocsNezU92E2nx@Q6luOFpEXd}ek`B7-H=UrX$U4l;8n=E-|bOPTnFAp zbb_u?*Oc{;K+4&%5+fh;i0DyJZ3C6`*#}8u=-xzZlN>Qq$~)cV7=(wLI=-Vb#@f3-;CNmzU@s zUoWDnOP9V;VfY+R5;x6Y`Upv3rT({R>G$ijGaR}{k3vAj5_ftlBu(hFiOJn>l8!Yk zYMHNs$UQ;#$NzFxO!cPe2oUv7A_KOsOrx=`zhf*ht(Lm z31qpDIhM}`*FtWV((%5y!CN5|{(;p=7i&_Sz?ChOhUD>}$9ii<3CkZMLNG+=rPUdT z6qo0_^EL#Jmjqf6G_qd%+gq&d-1^Rf| zUFUthh-of6m@NON zS#pq&X;gPY$Vk`~s1x)yv}x5MU;D2H-OX~jvESKb0M#un(P}#)Nk?!Cu!SsI>>sr# z#gSgaQfM{oMT_H=7JG4+@KU!c;%mk*A-&i}l-GQ7=t`&#STSXaC(t=x>ENZQdQf%JsMmy?v3Ww{B&-W`S@ zb)?R*KL6sIqywo+y}ym)xvh?o+->s9{mbNwX~o3vzNwghnR+WETx(sSP`&$?%ja^G zrzRt%83R9BxBW_6u?ItP24)RNsAMLKSL1TVJIMS#r0c6;FcCi{80O!Vu#NbmONOdz zg$z(z0bDod2_$Wy9r)!bBbE}Z0EVo`&v{^bd-&}j;JGk%Z0xoO=-n$OW?E>Bgl}VYe6m{k9ygN`n>)ILp z5oE?v<~VcYR$Y0M4%MM`$3_m2%QAWtfiE0j4zP3H&q2%@&o#{2Ixql!EilzRef{wM zD<|a$ou@+6R&*->Vpc^kfR$lYryqjuRt)!+6(aPna6d#l`M6sx_1x$Wm_Fd zM{_|{w|nVwkBFb7N1{9PXuk~zBR40=O-}lyRhuSR+R2Ysw+%N0+Guy2N5uc~O~e%r zKHuF5w&Mgp%{<0Z%HtWBNw-XlUoc#rqbxQ7Avc+PgJ4e#9^zhG^9I|IT_>P(^sBqM z{|Vc%_ql$0-|kXAGfCrKa{_yOHzmBZp?SOK()4B;dEx#TEzJlWyB~$`GGJDepw*oI z`_!-4S=P~(C=sN)*^+Pn!Ochq1`OK2^PD@yg?XHAYU4y&Xo`GDei`0XQncn7m2J=<)V-LTRz zFEICA3SAUPkbF;5lhZDu`@5XcUB>fB;Sa7J`R&;}^q@sm&n*od`@vLG1Zr|9y+0E$ zMWDLxfH04|)`{}_gp}+`$e8qaz_>i3UpX(~OKtjq2d%k5Z%^NVtH#LPiG+ z@^*h!_31k3`TAU8oX)6p5fN{c#!YMs1!djh3y!B!g!IxxN4~W5$!dvipSz~Q$?rnz zeXE~AT*wKY!Y;>FHm5wMCL4`ja)WFfTP?k&&72ly&mwta;i-oVIPYl-7k^-S>qW6h zEl^AJ2#d5{IiH7nd}U$+H4%NMIZvjzM{DNzS<2*o&A;&0X=QCSBRU56ew)xBP(M^Z ze)?5`#7jZvfCug!E{P7p9=!<8`9mc!v8=rQP>WX#m)ug#8et%27gDfFPa685wty`w zv_;>KM`HfO!UdDAT3y@#r@{u(1moNd?0Xb;$6geR5@h1pWR<+4uxLWpk~E9svMy&l zh(2s=TWx2;V(l01nf=>Rp2m*$^BWz;3}R#_g* zx>~3&b~CFCj$8N16Hf?QDk@`Y(9dIfzE7CS_m03`R38zdSdQU_B&2kebjmo6afT@NIasBM&GXOO_;*BOc!riTFEPq8OT3 z--8ikGV1yB4*@JYbK|PImoHH9QM&;Rv}|uQhbJhq>6acWK@S%IW0i_*D8Ou$z#K*9 znG`@qoz#MoYxb}>NB7O{qa(9H?|2XFq~kB0_)u=<=qmzpVacQjrf_EQZO-On9GRaG zLXnCD$PL7*%|^@fxD)fv)P2cob6Jxq4r0yvIwXK8MBE(rAI&Bi%i96JA0cl7&hsK= zbU!=POzH%HC75xcDNG{^p#jE$`q? z=mCK+A*^LZzjXKBdovJf+En~xKM=c6z2nfDj+1_V)wy?0f#e}3f-V*>W6O?$nMHk{ z4Vm@P%3Qu<|K;hF`F&Hg4#Tp(mb!rKZrXU(hY3x{TBI&NW3ZHk66m9A2& zLO-$=W@Osv+ZZ;0bn&roRvFkk+0$%)#*>GT5fn22H9hV010XrlGLF`IEbYVM?FG~1 zKkYDXYqr10np`!%>gaudgj0zTljJIp#=X4$WZ!5Hu(rHa;N&b>c!aMaaGk%dMXRMD0 zY&^^#sSdu+AzLG%OR(xVlsgu9L*jOCKv0D(8rHeQbmVRCcoUbA%kb0A{To|ZyAaVF z(Dc6^&yr*KCCSD0m?kvJI`0H|gFQ7M$za}1-bnqvpV)KbVB5Ftb^iT3Mp8Q@eJ8oy zWAVKT`6k~7(i-wtt22*)(d~A z>L@U${5y$RJ2+{(G@l0>c=NW+CN^8}U-tK@)iefZg>>s}f#k3sLsR^R=U$sxC9c$r zVgqGqce;QbyOZ6t`G8d_JwGx;m*6yhT@GdRnYez|+%Pn96m@?tjM2m&OPhGbH8WoD z=B#P1E?UhUIB|EK|I;0JY|q}zcdegW(~9Z-iW+l6r3jFKA9U$mG^T_kXzn#;QSTuw z3Q<@`Wc{u^;tS`qhQHT_WPLawYv}FP{v#Au|WV7sfRQC z2EJk=pLam@_+S4m0;KDkMb*mlPlgN3y>kCpZUXRQm2tvaPzS~=PKHtc{QYwImdU~{ zP%(AFyzbkFw?4T^*q{AXL)vcV4F;~QtGsIH*lHbp`MKwTRH;*pSB8_Kkw`3n5OQ%+ z?@$r_J)mwt7sr~s0dtvh z)>F3(30Io&)9~nxN{S;Oq@otvom77Hhv~g)QpcZ5lSn~M)Um452D`LCY5|aThq~lu zJByI+17&M6rMX2ETN@>Oq*9uN$#-q^S$lQgI~&Z$i>k7z6|>T#>M?Wf(%RZmL5B9~ zgIJ`l?fqV4&+QSHco0TmX~vly8G8wr+d@EbF=1$ggpH`{iSmA zd>^U`!cN}yS09ro=cVE|3K&kMRn{7x*Z*?b&QHQ`)0APMS^Ye7V6rTlJxl7b&Z2>s zYqHOXb*$a$wTNjKjGZ&y4xu?>;T$b|!}lV~%AzYkI^wUM&o2gWrkG+KBF`G>P{1uR zkVhW3>1>I+XUf}%5zgSb93_nFD1TF)l)!DrjFnYHg3d{BdbDl+ucus)c_dEv3N4JwS0{4IL>qzataYy8Ee9ec+UE$>A4W?) zQlt}fGCTM#dzgMFd?OPX{~Q32vr4Yv@zF$j5`sE*lPn|wMUj*PO$V(I&vXejdDF%{ zrzOPxFrH&M{LS|pLY!bggeVhozn8&b7Fu|XY)d$OGt=@8H8 zxUajf>e3#bmA+B^$DjYazUC>0olM`93vrOAA>WG1x+=TJ{3yJP;-RHqB0x`ZM9wno z(`>|CeYQb)1PbLC46)M0MWW$jWC6dQ6Rf?m8+k{l$-tUV>$J#;J@5SPj3%_SloXhr zUCUUW0jG4=TbVKv6lDu-E~ZZ}8W((k3)Kp`qbW9jKzSn~Urb%RheS8ARPI1I83@eK zTj$PsnTE%}$ugHv@)qv(;j+xcxz2Z*CW8ocbFu6W$*hI5+F0hiS zB72NYGTHD`+OX>!Y*r*MsmW@KLEIhG`MO2tXJgb__pq%eFr~|W+cF=|Qy?SycFWSh zjYQ@pgEV%o^i0<-%vynFw2#<00;0}W77kF)^GvWRyxXF-QWZNH;E%h+rt>Ub0&z|4 z11Q2LD%%UT*BF~}YC;;#nhR@4x>jCmy3dhsNV)dj%vxqKSjlRW>UZrHWxS#Gj!!v0 zF|*I7&m+@*!rAhYj#m}H!6N!M#u=cvE)K#jxHP+*`bRkiMGSNpUo@W^ks7=%r}alApL_=e&6!^ z>nj#B@(Z84wVdVJJG93&DAPwuV%QA&%k6YjDHNkICoP2bqRG>yP+YeA3A~o_xhZAy zjXNWeanB2MmxSV=O+z^y1{3uU0s?QB(j4~7)1=19?^m{=Gfq7>WIXk}Bb1H@gyTCN zDM0g{H<(_O&)_KVf$;Xx4=55$aIKZ-V1>S!S%Y4tP+$2C)2tM>G?gM&QgawfJ>vFJ zBgv;?%W;N!^V_j(kA9zNq8R&_>bhRJe>l`q0w0n~!z=GEZst?}L52MXC*2vqdS@1y z=HG}ARe|woXr#$=>8lpIha*OG^`YG)z%!9-{H<`={;$3YaZ`AGO6f_LpU`Z=*BJ|6 z`ZW)#`z<8f7o1musY7bw^V-G5rLEq9U@1oWf6{C63a_FA#UrgursX)Ir)P&FvHD^w z`z^v=SwB+mG1%y=7uwc)a*p{tChM=#?a&9wN}K0B`ov3EccbGud?^ZqjJkC5TibCLkqGV&nx)Jc8r=6f+Yel($51qG9Zo4(ko~-c3 zAIA;mkxZmJ;kss~gebh&3V7YoIfwhSxvMon!ZaI?CtVc86>fDd=a|4LzY_4y$gpi- z28Id?ulWUhFx@I$R##ur<+9!M0dQe)2X(=By1%<1Fg3DwZ=-L%u)6QhGB`+Da%Mp} zoZ#IBjvv?+qTL2|)2=h@4TrN%F|E*f5(HX*DRX65yjXx~uc6;40N>Z#Q!&{Y2ev>L z9KH#$&YPgsYCQNI6B>4&biSG4!Ur$y&YsOB7wf(^IhI{%^4dZ7gl=){c~mjo*jwLC zO;o=ZE&&1#@ES{PnxS{scw~9kiE#w$AfU}7@h`NODFd3Y06v&P)vSg-H23)zx=dGV&7e4yD8+h^PWJ`Ob7R(?A-F9|4=gh5F2iW-F1darRe-i zanLi-`DfzuonqpTJn)5_YiS+X{WeMSD8I|5*v6qFoz`qw8EzqlQsOT?@Nt~FO^8%d zrXZH2gq9FyONh7S!`DMJtT}hKf#|(PIS;JoNNn8o-!q%J5qtK8P22HRD>K?1WS&Pl zu=iVBKyNxjJf84erhsyP^6R#&dUoJHQr<06z_xA2j(RYnck&CQXn;1Z-!x9xDsI?2 z%5eID)r4H=>7Jw1sXs_hRGgXAE?tQ6--$rXz^oD3i;U1cQ%W3w|;t*q;s>t|>7Q7E!crsb$w1FkK0 zoGY~jIfa;YCgeXQ2aScaS~D|Q>LFevYC9dzMS6isLNH&a7uZLuc6g5>8I!4#DS5lI zBYmrwta(6GB4+y`Ucr^^>492z&+J;(Lel~=(t!|qyW#pTQa`mmb|v`Ou2~KcThckE z^BH8Q^oyB|h&ADN(pJNh$XV(9l5 zp_wi1A1xS=9w&i?Sx!8f>09`pm@sdjQ zOU&ZJ61F^;IR8bv?((<10aIPUP4!Kh_z{P0eL}g<|EZ^c~kQu z66>s?MjG`F|h9S@aS?i zr8)V~V!03*XJSZpkbai2IJTl*A#|9$TsKpSvwwGiBDXno-fefjdh6Ll#DdG$Rut== z&Lp9(JWs34wMVo<6jugZO9v8WSO>jOk~-X4=^xE3>EW}UtAEbKUeDyI`;T>yl1-`^ z=-G4F7oK}2daFSR0Pngs9glKhReVE8f1<2dfzR^I*g(P*E$^$nrg$mS(}Bh;K7i%~ zuH4V9;~pTC6lVgZQ-!T5n~D) ztS~ksj}Aat(rtneff7*cWi%&a`A&8m038ZSXOJ)(q9zPv$%-e)WRMWJjsXUx2V}NO zrC~_8&O;*)Y|0wg_u9hM7M(-dmSC;DttrPN!la#$^Q!nZ7-EIAqm0xoJ|)XAcz5|M z7Q6|d0pW_ZTJQaJ!{l5O0>2}E;X6;j3?Cnm9@0TW(b7|;hl6eVt+XDEEoBVriU)Ow z`McSj)Z|=U~&oagW-y zPmsKBGWYn{+2TI$-<}RYU0}WHczYYWbvgyeb@H_R6o|`k2rUjw?SjE zg;3Lou1-z+qQ9G|_${nQ!L?#7W~`qm5yO8sOJ2n?z5knAilf0W_Ab;ycK#Co@u>mO zhGffqZrC*{kMFVE6^)72v-n3}a{I#FW#;hFUU>oUDN<+I!ozT%|8z7^3q9pBtxqJ* z<@{3=>T0+glgGGUc`l?v`s7DEH|5vUsJ4+IhRFphMfp2VF<-6&<eWH@`O^Q2mbxk)q6zAhG_qL7MQY$ z6&*o$);taFxyycNc=eoXjxpR=1-`;9&DOSMIq_%pLsi3fiYWJP-`^jHRIy?lCpV6V z?#l5mB9DtN;9UL!AIF4$l$l8sbmXJLmAnnCLvz3a!ru#4I}faA@)&4BCsO#veiu9y zn^AUuz(gduAqrP;ddhXR(IYtRbQvwtb5%gji?5vK)r(Ba#DLD^K)bV{FKz<%)~X9g8`ay7`g0@`{O2QaxzbME*43uvl}*cvFNUdcb0y4nCy#w>E9(+!qN z4d~sl>cjQAHyIms!geq#O{6ksJ96pdx`n6Z+?4C1g!GsV3i~G;8@Z<)qpL?A z4$fHEg|YKuuVYu7MwCTlYlqvG*8uxS+RT(2UZ;8q*VRJ?zvyu@!{8}vc<89*9^kj( z(vF6jb+%!EXqrvKGx1c~tvL5*I(40lUcPHl!rwwj)-|`c{oLoQ#+fC{f@GWUQuTKx z8+*KoN;$S`cG6#F?fkrYFi7j&qjMjD?a zh$du}#o8}O`C#lk$2O;>&0-yPbfB3ddMf6z>@NjG_N;H{;EpSnCT#xe3ep#YWwTjA zE3SyCBhYPX&+NgB`K_X~9-j&p$N^c~@Aco`FZbDcfpS>FU=I8}dyPHNXPBC4V;f-z z!+ce#@c}$QmSik-E%8i=Nr<~del{u2-hg|(vKl?4o~s2XJ$>W}jAy_w;oIHNTsr)Y z^Bz;$rN=k=wv}gR-z-bQn%cTdk{fJ~gaOSvIHm4LIx<0ZCAWX0Fx_MX_(N)2edIr8%KP4Z=Vlb+wfl-70^RtZ;VehJFh~5Wcv5LM+6W zB=Vg27bgURAQJbYu1BC-quU~W4=8(jYP$vDw1$3O%uWna4bRy%PN-5srkFM>Q zqgO^@q2kT@0{>DfN<*cCA-95&MD1)|-*{q7xtU2*fqZbT8}KFYloEbe$>e*zUD$CI z0*sLpDf3CBdbOj0Z-jIDo?3z$(dUK~xt*KV+G#@kV@DnCVoXHuOAokLhW(Ge_}lio z|Ml3W9v?c9?>V~=)@W)scKmGZAmol=7ogc zAnlQzsS~7vdXtJ>K%+0iMXD^*VqYCar$}?=HD=-aEYtSNMUgto!FMzDgZwNNtxhA| z&BwNkX znp)ETn-{WP18AZyH|iDhv_|$C{d7^=U)`;B-?F?P?KQ0XvH5zwB4BgmgG{+AqX|y- zcSac?M6)<{a|b8RnC>X)@%#u+1z)wqDhA9-hU*zL!E5njlI zFEoZ4eH)JpXug)4;#PQBx*!YmDY9v$P9%Ze*d=Vn^YW$j7>^qudB{I)847*8}Mjyhj1PnWo+ zXaf~+>bpjU8&rYaPf$s&nN?PuroXSoduS+Cli*ic9tJ1st=s-IbJ53N?u?*U>z`n> zPEwv_XTJ@`pFJ2W#8i!`H~r8ZD8PTDEjlK2mf{wrE>_OqMNrZIL3{#hBi(pD1%f$u z8mNsAf9042Ds1^ge>H~e}myYL`5u5jSk`3Py4`b%s>Us2A zJuv|R2UOU#m9&+;_|l2beRIAg-JqO*piUg$>P73J5{9bjI`MF=^>U2q3^6R3Eom6n zlpOHSquJ;4p|MGRf;{6PQ~kU>R0Xv3@F4!)k7~h-@nmYg$Qrw%J}zLY3Nuu2$Zkk; z(Ceo;k|*0b|6a39?QV!3zMa^tJLQn{fre=|n|r+Eqvg3C?ZaG9hUzz0LYwT=bO%zV zq7F+7G#zQ$#exqKRqjHhez>)Ws<$!SUy6bDXq7WLozZouR<&A)nZa;`xZ6=1qD~`o z9GwK!_4zNrWzb`mn%dDugf2S!EcWKM?6MLv42#Z@MF^Erp^sC+^Su`${`But*dv%< z1|VNs8nn=k$MmV zTL8)%PGtdlpTnQZrCf%5Ez2$6%KoFZUi3OiG%~Q>HCgn-=CA@cLN6w-_8?Mlu ztfRL)Ch#eNh(kQY(?8@vU~p&One&;olKC`@Q$32D@E$#;k@dx?Pm;FXh}~Q zxPH&-({4E80yWK3lLO?n?k_Mx<5_0^ZB)R??g%Bn0Dv5Y?R-|e)q>o>bbhhM_Td3- zcb6bdQkfRAAtNck$%N9E@>dprms^kV>B|u*DaF3CI>Ijw zOKvFIOW+(qnb*Ds!oRR%Q)IYvl7=r)^>Zm-!*N9vcg%61f7@fE0V^kOy65g5f|nRF zUvjYGW+F6amCHto!s-5r>qep61=wJfV$OvFB=%38B8TW5bDL=9k5Abx1nz30JJ;UUKJxu^;9bE_Tq-mlwue+HARksc!jq73H$h! zSV+I>sXGyGzJ_T67ur0cZ*pOUpPG5vkGQ&!1X>asZ^k`SL{ZNo_ng-Nj+|!Tw*G?> zL;_Yg&ly~m={)b$IxH7)b$334Lh;|RgAVL=5q=ord_ME`iVjVtN3DOPy^rGAqz`$L zQ2^%lX;!;@eK0n&n0_@2x5m$QrC0Le+1pvGEz12B{eR-hc-T> z88^7qMs8VyVk~ZKA&PUmp^Ovi;L%Ah`#GM8aJ19uoKUO8dW-kk7vPyUqK6Zr2&6bhF5ky z3SacxSl+9hw^w6}H3>rsv`aDw4U5CHNSArq3;Pp7@l8eG3ao5)@xpKD!68kCsMu3< z^Hp{hGWoMMUQ6>4VSh4k;o&ChZi>)1uehXEHWOgcM!5VU%VbJ|`%~d=T8Itb>}pj9 zpaVcx^jaPYqv7)kaQmLrgJ1D=IssSXO;F6qr{_`w3PNM25uWyc9uY{(#L^_k6YFzp zz#IVf+jRpf@W2g7SLql2p0R@;C0)GHPp?Wm*T)k5ONCG**KA1jRswfBRg*xA1f3@Mpye(<13LO=k~TX76-gm};l;5Sv(TDuZt1}Z|&OE*4?>n;-CC&rwVza4wwwH0xH%mD#_AEZMM z22WSK_6V5R1uof_>;O-WYr5;rz9HbdQE9IP0p|cTQT=q!jyh5jen5e(!VpMYG~e%6 z4_{FtZ26d%)Db`ZAj%i9b*Lkaf_#^odFZAt(*!?kWgoM6_~2lMNa$ctVoLMw76;Me zktAUEZJNUi?LcRb)`?=vs#`iw?pj=oq2Zg@n8XnAXJNjZ(!bKosf?EBvdi6UxsaL| z<1gOOSza0vuTeo$Zf+L}kwE`TFG$;BtK&`fP$YdQ(O6k=2dB)HFZm9z{>ZR^2idF; z$l;Y9Cq0>ajBL};lh3;q``TmOMq|z$~B{Ko&#;% z?9m+cU97|?R4+oPZ|UFh*j!NBhs}>ZudN+ChmN6f{jCY~%ViSYe-9%IS{Ne)LpKdE z74AQ$v(f!vFM|EC!hckwsb=fXt1+;`_e7Cnqqp3Mo6peO_)>WWhO@tkIZ_`j*C*>2 zM^8_>9jZXq@J&Y%>Fa!rpS1&!`#+5P$W6td#vJISc|y`%n2dDEINKwY8PsjNz{P=Y zEXrcW9r`a={;ro(2Mhp=c3`G3YL^M%+XCb5Y=`T0cRjctU8z}E+5^CYHiWFa*mfm* zpwctYj0`g_%!gmf0km(v-o(Fn8db|l9H_JI9n_`}=-V}%k)mk@92nywwaN#v1&ia` zDFamMOL2jZ7vp~llk*uj8ql``9=wJm2q=Bvvz=hez?J2N^0UEjfzMnSyfm%p{VIwTmZQRQ&SEM z8eWBebSMX#l0Ivy&x|i`w@0~eUt0+{^4P~+ie|*j-YwZmQ`w!Wh} z-bnR5X?}|CRPw8+k-j)2`RNoRjg-8UTs2?Q0oM!rU7x&f6y^sTYIh?Wr|0rh4FMP5@zgX(gusn`3_+a{#&%ZQ1C>6vCDGBZ z)mu!sM~f_6Dzc`&9v$^Cg;ud9$omQv^T|JE2Teb$3hzOFGd#+XFkei$=k4THV`v|b1?xWo@6~SkhMZT724pj3Xp1- z)|?Lmn*xKJ@PZerY?D=-Am+h8AZt-jA`=|mr~3)VxK@#ebo{^KrVEDHX6Xt+l$25Y zy!*%8IP?oouefF?&hBR7V<7YZXK>|7;T;6*9wU^UR zY*$QktUNy|`CfquQoxQsSWL4M48H^daNbhc@Nwxb>7D#2(B^dHVLKL^Iip*VZJvnDa~3R;T?>xvbB29v7Bb1aId(%38a5 zVx7V`mzSQ{ls)d_pS`BD7_ce72{eXwF;N3Aqc4X!TGOoYwnppn2J>fID+5VgpJg+g zz#BcXAAL!6stV(DKp#xuaO4}aaK*O(8k>@!b8zjj3@C(ulpMx_E zpQSGU(>8ZZ*xk*QV0V@_4!S13Yy+>JdVKvV$KSDLaQ8{pWxwn)rldA@UWEfpyp6L2 z@tJu=M7AY;_62)@%$G@EqYKr;Qbg#4!ap*>L`{1_U!;71E)MQ7Dffa%3ZO&g~%0sil*tGU|oSWWSKCO%i26xp+8fd3w9|` zDsu0Xp)CZ!(xA3IHX7jitV_>rSht{t)>YAAQ*a~Js=Yp`wn z;L1ubr<&YL7u@FQMn(DR-o$$cW#=$)t2!y7Y(hJee=dbh)2d~?ALa+g5orXy9)}2_ z^N2E0hop4*Gcue7mXXL~l(oB!B;(1IPEHx_LZClRMeg`m<(Y!z$XLKBM$@-Rd&^BR zvynKeYYL4f_j0NJ^3v8rZGMR}qvuTcBPz>HZ@Y2WPZ)(_I!WWh^y<1F65N{Qp)SXS z058`bH~yE2aym7=%y~7k;W5#pe~lJ~2D_?Mb*=9!CCELVP6Gq&BpC1&oce$q;{g?DNc`Wb; z_~0cqw>yXCm_))G?E@=lNA{$ig!vTthm<8z;;@9>{#anoqFT#0#*+cCNAmR}yhXS9RzwcL@zv3phi~xXc!u11Lr)G(skL zzIE)?@$IEqiw8ZYNM4l);)OH07CYdEm}e(g9_0B_otK~n6hRk7E`XBl-84Zk-4c88 z!QUeN(CgB2U7tfn#*67{GLc!K(Q7Fl~9U$No2^Az4&=_)Gub-LTR>&c-Q6 z-~9J-dMIUh^}>pi((Id`=PlaKx$BBI8`mpdP#dQo#7H)~TFB2G{;1agtyp4ZkDLSx zaZ~Txi@;((@1N|Km^Bu~{d)7itGV|LU{~l-?*s%9hE*K4R5<=U`mc2Y)OJ0+bpx#7 zpKZBlXP9%n%^wXO6oQfUb>Ju4kbMvmDz!TMd_uZ~8yg2FE6CpgYZ#FRr(J*IIwDkqn0{7NPsLZKQlD#C5Pug>lgHa19KJZ( z9b8=jY6{pXh>-CcK_)_aD{)NA@N!rrOq>d zV1eWGB0)h$>-Zz*Rswux#>}J&)cXNDcPZwrnaRUreh+(%tfVOmp~w)?*vaq;^1STu zMLsi%@OB(2Df27&DO&Z-t)(6c z5ZR;2EKyd8ZdpM7^XuO&yHKN}q`2qYOI<)g`JNtWr#sj;SE9Br{7Lt*Rf;|?kQ)8r z(j&?**`zild=7wpm9@q7q|(&WQ8H4(Qh|!3rLTXMd+2HTuYuRWyKZQN$H5h8>=-9o zkFQZF_(=LLlruzPAl;~u+hgLql+JG8putk(6$;=N!Lzh?f3o5Z^;T11ed#BsOZGu6 z@VF51oL7~P*F1%F%rBQtNLjp$ zKe;AP{ywyJ@9TDU3J^_hMfh+_YEfs>%5@?O^jw`)V3hCRg}-lIa^07HL-%G(ygsY# zRG8O%A;q@;q}3E?0d%TwU20ewO~ZPc`*nr*{?<3N+{Z9jr6R!JK(=3YC2<;@(sCyx zHnW~8aEv5JG7>a6HRa~xNteBo-!(dnSOr~^ICVpO9@n56h|^+iz7?@%ao~P#X*`y}O1L3afVG`surcF{AF{_BEHNZFY9+L|k73 z${DZQg)n)Ei*sFTecKN9w`7RZ#HTFgqs10~?f_8Lej1#2Rxzyytv5K?Hamsdce7lf z?AuWp24lDdSUt*C7-9_I`<}=D$0?9E=WhukkZhgh4?Uo>3fF#3GIiD3Tj}E)`vfaW z{A}vEF1$qTW@n*n({>qTnSA?b!zafOB6K*>X-FxDJkWa)=RLBGaoahVeSCFR8W;MTu+9AD492U{#sI*dS6%Qu?fyfx z0TwC1yw8s6UO@rAE(_TaIi$_iJXBWN0H(`Z!m1|%;qI^6fAAR2_A|b1rm+Ho+0>7A zKQahl<9j1Z86Hct8wALlXBEo^e@xL-r9JW8U=Pw0ag6=oq1`H^n>^r~l+EyN5?uq# z5-o~4wp)99RAswC7X`Y`Da+ch4R8@7)ni%ttC`Hp5}(#UQNbl+G;|>lqQG>R`ADvp z46-bMuUF|~DdpgF_B%&vO59UWdVQQHuDLi<`F?8No^1QV;1^s+EZ^MNY{SRD&$^K9 z>T?O`UHo}}eRMVpCFnw|njWt-dJv>=_xMcrG~gHc2*Jm+bxBH|>jB+A!&~BpgWNEm zt_{{tx9cgm8;Z+DIt?^5wym>~tHZJ{biZ_);w=QK#0piOX0K+KlE8om0J+cOk6}Jo zsFH3yRpf13^MhQOtKq8A2OO0AqE+n{4eJ^loMj6$@tp%N8>Y?Vl}Mw!S3wK7^}03& zI~7wRL5Q8-f7X#_wQ_8y8sQU#wThzVzU0R&4fNemT?d zFM@GVyZVR1oVKO*R)>ABc(-iwC$|n=SRG@ef-Qd4+gI&C>YDXP%=C`u!(Wr=l$d+@*p%|_GO3Qe5G;eFivqUU*1%G6md+V zwY9y~d%9o!bAZE_ei~>VM00J>{~_uu{F-XR|GzQ15rNSl64H}y6cH&wk;-QC?e^1J7kf5O?>>)iKsed5jaF_K1XVV|&sLi6CT*{zfm-*@-+Y(vV; zz_E)RhR`kIU-+>%xv$h##`l>>JQ=1!U>n)^VM5SP=W3G80@pmCl-Zkzxrw7~_$tc= znr*os*VS2Z5-TZyH`7Q)eTtu49=38}((P+_`5tkDc;jrbK zF*U1iUp$;&)9T082e=U)H!amgNbDkNi<|`7evq`S zvWkgm-QQVrr`sgu*VZ)5|Goqq0d|4BKnd(M*>(c(L))!oNV-oC%~K(HWIBzk>pIrXY2UmlRumA;~FP$F`rjVC7KoAONE-f8lkZr?o zfpE=I*2(p@gnrM-)v9eS%@h1mzg0<9VcC7JLydpzkxl)v%oPl)zPc z?R#nR*(O!OU9fFk+{=HurNU=ria@V12daWFur@lZynde`Jn*u;<*xVdenWYvF#Db4 z7zV`@h6Yk9DxmydUe2rk?d3%J_R)05U>8$BiZ_lWCLB&dcMlrwVTy0}n=CTN5~0wD zPep+VVwUi&ylTMTU*xb4k<}5l1?+vKU_#5hu@n+{g7=}f58FBX%&}<6&D}44vnF?D zycLzV0Ec?%G8dOuC?fPL*q*-&q~%to|H2^IHh&g9*%T_fPWy3bFl(?sW#8pQ)##q7 zrBxwumG4d3r0(&NQctfOxb?!YkYede_QyarvcG{{>1^(h#vpp76v40rRit8~69^k4 zp)!BI6fvyT3wJfwI&1PRyfSlyV4$v=j{<<<_aK%=t4+}PbOGdTH1jK)a7zneQGx0*U{>GWFMq(jK1^hP5B8aAWl z->1IQzLaHSQ=)d7m}29uY|x|r<($w0KL56dQJ{^xAh*VfkoBP3n;MTF2p{=z_0B7A z<8PYr1O`CDz>FABIk*Kb<@*~K^Z*qPVH)vNDh$2Vvgm{(h-_^p(jO*AY`nXf2TyDo z=JdZCh)dd-4Rk4DyrefI_YuRpM8%ar??d$-3*_|T|wi zVq+ETKbn8I=Y)AYK3>p;h8>L_0!I{Id+rzJo1mZ;^;qOcI52PJ&`t`s%|vr~new8S zL#SA{a9)#BCnjyDFX}JwCLYvGruH$jYNTkgT9@$A9!VoD-%2$gtz*nqhm)o{FGj%54p0>NG zGQR~Z<`iWzrLwQyoXw}5wmj0)jn;9lL!4%6ugj>ukF2j`TqkIc?~T>X^{$GYd$sdW zA|7d1Mo%tI(Vtey1U}EZjbKb8fs~xQIq@g0wnr9cj;R0|6x8;47Sc4gRTxI62;k?v zHea^Hl`9tN2a$;9O2<^EB)#nAvX*PD$P7KP%ek_)t7&cMQap1j9Dtb4)pVyMfhcFPqt z3l9^W2L0^2@EMQRW`fMV@gcI%+LJb?-K<^9{81;Bp zsa)|6_cRCC+T<`=K}_3IU1ZZFG1y$ml@&Cy`h^3{zka>55AhEN5V~o&ZN)CsyxH1( zki0@BR|bW4Qi#vxR({^7_X|Ml`_;^qS56m@{fEIV;A(&Kqdw6vBD2WG9zq(9a4?|$ z5Vkr1_|Ow+$JLnP(9ML$^35B&sntm;8|G1sbN>hfVK8~+Pb~a(db~QiP{*XRFA*`i z5YsX+ZqnV-zVg2scgs`Ca*II(TC_E25rU#f=y!QmcYp)P@Gb@cP0HYr11pzwW0eb4PTS1ZGR|rmhh^w+Vi~?* zRDo$W&|fAvc#>~sV!u7?#+9fKYLdMGlC7~f?{@puWjmG6JGq36+e00E+uE5lzbcrW z`fI|V^wbj;lRU}QeKE{+@@%?!5JULuFqCt8A6|yDj!!#yQK-Q`148*$=rge=LQf%} zP?y(fQNvpX6u%fDtmD;1EmdQG$;_DNS!oBc_+8!OefT^!KQ+A=#Z_o%rWDVGnPkhU zf6bXt6ZMVI`CWA5D7d_Z!NX2ajxXy|%~{b2MR!8wb`=FDY8K(JqNEt2GXr>X1PXqE*RY~9MMk6`|L^! zI!2WW2efK_6mlB)b>-wYpOK7iS~R4(;Ahl^b8^B2VrK%AlOG~no;YtGw_=`yJZ&{l zPIXL>i6MN+Zvej*#r^Bi8>agWJa(5`ndh^i>61Gf!(f$bn&)$~MUl-Tmxo<-{fpH18wY9nI zczjT7(_`B-?PF|0NUWHKeI1w?%>+GooKQH7rA^qewd@?J7gHzRKztVf6mPHD#)i)g zJ$3(U`AaK(^>?Q!PP$1?U_PjxRlCJ;|5tB{|GTR9RBd0IQtbQR&=12fXl6M-Q?*s4 z?=@XjyO{16I*(82eNQxwOXRpxcx=5Km#<DF z<+nRH+<0%d3V)cu!^Nmc_?X7%UiW^J6!igk9yn2Z1ol3 zaS42`#{QY~r_21;`XRk~(PFusc}G!1_n}K~1klUbjyFNsPp^(#F9!r^JoLgA1?Ar< z$5f$VdD7$JM5#bO;E0P_=lmivOEKLKwePanR0DO61rv#GY5Ls&u*T?8)o-QUSPl=b z6m+<*Qu2Gwn))w-c1of=<28^9YkGd$<6+a}PthNiD6e+NpSUQPh16I7zQgtWI4qoF zkRzn| za?;hbIgTf#aH~(7DDUo;W4{3D86bG+l~HxyKJ4sGf)nq^|LCVTb6B<2I%j@^Qg0gC zkVU*x2CDX*`l^2xl%QzyKr;a)#{QI>vZ+l~%P=zUYS@dT!QU1A7UUSvoiY9n2Y_B8 zheG{{75{3|k_t04AQha|4B~G82Upw+b!M>C6_9O3HIsCI@k@Su!<}1<&C8e9us1?6 z=V~(oFDB?#wD$-!LHB6)1rc@Ll~YFab5G4aTnzug67y6at-(bXnN zdbuvX+?OKw3RRt%Sa^hzjHemr9pGGv-smPx-iyXxoLA^{JgsVP3GiTo&}~)FOBRmZ zR~>_@o@ckR`T6dkfM1u+Ngav!BOWQ}3OKkI>ZWGA;pQ=<#z9~3B*VNFHCb~(09d(B z_WMN4kq#1xgdokW-`c`fi?Wo326w0bRsQ58X7(-qk@>1yT~W0=kY4dwc#qbLD2gr& zDp{UW@)kl+T08%Pp6e3lT&2W^MXc2W>6X+hQ%tH3K35%PDr=NOY1e8@X$C1QI?d?D zh?m@IZs#RT62%BZ-I|OdY_S0HeBl%mt>D52pOZhMTEgIhF z4cF!eY`*=+%wyZ!WZU$#I| z>Y=G+Mk@)(ped>G^&?_$XR)c7rIN%Z>}l>#LboCUP#2bInK?OY^mjKV*vYI6WUY)< z#7oDx`6B!p*ObGGN-8`1!rcvVy8dT@Lh{6F=k5`{2~v}H?1WD zU+4lJ+GWQQTKj3S^~V)?e3fgO*oFk|DEd<&wFkReW8+IJy^bM#?5&wEl*)87KH>NJ zSmnJ?o2nAKBzH9=$8G#ex@F=f8XjXkqt?9E1tQf#4evO#Y$eaCFdK1Pd|RB)b`&qn zMmR!vDUox>l}`z-2i)3=~AooU}CVvGb1e zikz1|p`Qk76YOUO!Ub<7-PT!KG)`aCfl)D(P(cL;z99UBtyEUy9r=hL5s4J;3xzsLYKB@wbt=x{HxKFqm_E2QoigRi9v2X}T{q2FceqVjF%Ieoc0Gl5ri5q=G zH+WvSawOQB{=VAG-4-l4nEI_=OtsH}Q{P)hv#OUtUD8EHyDZWTEup-(6K8zq3JBLk zUc#PCrc!K*MqYjXf1*3Z3pdoiX<+*uH}D{mT~ zz|i>qs7`4excP|(9iVDHq0RQF)djMp#|M!01lma=0M!-0c6{xthWK%Cxxs4O__Lzf z9U((7fV<_t6CRRF%kzyPpkocRbt6Kgb5^BMuByuFJp3cWu3RYoixA6yPzvDks}sv} zC?Mmj&fk+hHkK1nH$U^2!75FCK|!2OH&6Y100U^Kf2DtI9qmzMjav&}k&^ zhGt)Xh6m|r6{`~Z@QOHy5h`7!BZ{=QLfrVAS!i4q9F~B9_#ftkp8@0c&@E0ZzPmTp za!OAXkB=ulD-Nkwf}r4UC^J2FmkMp~`xQeYiXEd^N^+b&+X3+57eV6%y`+lkaCO}o zIUG1GfYTfnwg<1?#GeR-{QC8{VJgY5(|}i{c*W#1Hr8}Fn`#jp`*9h4iTYKD^S7Seq(oCVVt zy7}+)hVfr+&|0BwL|3oV?ww0fMve~}#>-1YXdjic;U`OT4RA!nc$?^`r2k>&%uXv@ zNq^X61}b!mmfnU@msU3H z?jfjd1*^RlZ&-}0PXw{{cfQ2B9&&qCVIyk&{bB5QOW`Z2q4P!aza)zq&~S;6Vxihy8r6Q>YAniTh}5J01pDq1T50L9^XP)_q=z8;)e4_ zUgN@fS0^B!yXyPL1P1gwReXf^FEtBcuW8L02@``odpcgQn-#Aj!*QBCc-ES0A`8-! zZa)8zk~{Gdf=n$pqkV!qy1P} z?ai4F5f9q0D^YZ-byH4O3a>(2>>I)l3B`+iuAcZTf6Y;!*+bqj zYmX9kXvIC%9 zJyu{F11>ZZ_Tu9M^c+a{vMZcLCl-%3fxD;d;$XE1Wih55^+MvHcWz1@bs2|x`Uy-4 zwmxqkZ8HY^=taD;BsCDgpZK?PH3I(Q$wj7ZGp+ra&@5BD3Y9C4y59tn0MAq+BH*vgF!tgS$0QQ&9fKt~q<|mJN z=U?aq@GxpBk^gu19Pgsw#AQsg)B*t5=aEf|`-d>Pya+wTToB8Ap(cK~9dr0KYpO2Z z0WRhSIxcusAvBW5MD-unNbQ<4Q^mFdrA`L4{f#M(vFXf`5GY9yG$v^1C}fB!2%@hi zV)!Tsk`i2aEeO)$t2SfW+QDVn#Qn2Yik$4D;(+HVFuYXQw-nt_(teW{PV{YXUpqJ$ zbIp?BB&;Kv%?G{Tf^3*6_B6uEm)%rS+EhWzG^~5|^F6CV(@%1)fGRd;YAd6`(xi>h zobenEGp>mbn_+JlPjqU{v+S63_i*d?(makE2=ldsP6e2L^JE0@ScUP$E+nu;R6yyz znubZ{zs=#1r^w43Aio$+yzZ{PVLz#6dt8tt80OWE%PqPL!#c#RC%X%SIGbjw4|jmy z4NH;?C$t~;F{k71<#Z+eN#;Q}nHO?Xr%|B%3U)#N`{`ldi%{{-4`!DNvz4MP3QU8L zQ@g$WXagRBzb@8_ME7;fb7Q=uH@p@y+!Y4FhNIb!tbD~x0TDUNlOslK(fxi+kjE4V zkCgXe)GVYF&$T#%Z!SyzwOO*s7gG$y;nHY3cr$?m$RWg>X&`4n$29uBV*VlL3|AJI1gp?75-Mr{iC zAkzYoAXI|$z7MLtLT-{H6KyWQIu*Pp_7(|c7W>KQRqG_CdQtU$5 zt=1nlXp`H|n55~fBao9=I^syx8t7S<$YZ4Neo+^>iyH=L48ImgH6Qu3A91&;9dEo| zy%kA$xlseW`k`QCO3|KTZi4epQ$m?5EONG+7hzI#^6ZcqxL7L{+<IO?O5ki4m}~-@DSylLL$`#ID7)dvkl|i=Hb&6pqrB*j`^>n@ z8v%!8{dyW}A{UJd3+wIHfd&WDKa-OhTTRTR4f9^N|GKQ90}>84aZpeAc96cJ^W(k5 z{;=?Yk;^(((JVM&owMly@82BwmsDJu$F*hc$&SL!u!z23-E>xmT`PhGEw__*?rtw) z+%OSY-8Lk!LglG=Ur^=GVY;epis20{<>si$wtCe8O|NlFp1CFDX8oddjB5t z+qP0Qmd!jlG6b?CWc*3{;Ptekv+ITePVNGd-zpWCpn}JKe3-#r%=Txbc}w+Hj^9E> zq}(nKO#mw{ugqN@rCNM-xH0Yo&EXw%{)cZPse9Xd?)CQ6;UD+_ zdwS5h$8x66EL8})0{0%~treVLQTdH+^>IajgN>w1^~8uw_T4L!DT_*Jo&Mt+(S#-i!NfRnxHR& z#76d!Vro9RdFI<*CoLLWE~7Yl?!YYHO1JVOuAqmjFoBxowP!=1v+V#}P9 zc0nKYm0F<>yI6DN!de9Mxc2B-GrW7I|M}eJX^i@;gS2$UPb}Kr=XNZZbFM6bt(l4V z0q1G!dRNxS!{E7>=G(n;$&v<67#Pw?jyXU+O8)^u7iFkqgCRSN>IaetYhk!IozC^- znM)38k%@&+?__3=oU$X-W(*aQOL^$Bct&-PcZ+)pgR5a>KiZ2|2le39M?%B_m{)-e z_g)j2I|>Fh#}BNE*WIUyH3D%S^LZQ^{@gnTX6bT4{GMk$WS|ui8g(Hlp|urrFI0l) zt2W@=`*eC5k6-_3Rp%6^B~*p&`TRq6!|h(iU8}(7`GDH@`UBWz2DR-Axzv@o_Y|P? zKxcXza6gU>ws**#Hx=1a42;IKk5W{?_3@9u09cUgRC+i0Ejq3vlP?$kU}&cmu#;(Nc*>qDtNoQE&Of><8*!x*d7WO)!i<&%f0ghj^}Q^Gr&?q7rJa8DLd zhf&Iwe4}6zeS)SZ{h}i}X!?O?#FWF*U{KXWarw+Tv}NCPR-8vG{n+Ih&h9|$zk(3+ zAzSviJWi`WfF_l=K$gO|;`vh&ZSlJv=(=tI;!4mE3YH%k1+XmiMsK|G_~EMb<2%MT z_Oo+*+y@~URVpmIUW`9{!>wt8lpD(}6%-8RY^Rw5rHYZp%Y{VVc*ATQD}YTfwezf{ z<|kU0MLQVmy}PrRB|r{1?@t70(hkDo2*Z{nb{pC*UtLiTQdn>K}T1FtHg zFiw4e8Yr`zR^#TYBT$cdWeIMc0-gkV%Ln6i(SD8rH7(0ZGloRuf|w>*kel=k*^!j? z)c4ulcFd^`A-mW%d&fC(!@V$GLcWH zo=reMh0=xz?qkDCbS=ObMRy85rXajsCG;fAG{tNUz%<}9i|HQ$-$?ExuqicP78apj ztK{9FSPeL48xOz*)|w30b@{CsIr`0=dmIo5bPw%)FRUy1y&7*2GQl0!trSeGyH7m& zvRw`QlUq|!N)zmHJN4sxBex7xc;XlU7c}}#18bn5?CA(^%<&d%hCXyrK?;`vuf95p|pl5sRw80t4*8|EvlN<8!|1xdb;(OR-FY_mzin3H)*Qvnr`TW(NAYmXY_ZT zjD4?WMUIG`X3U9{CkF^G&rKR|`$ujS!3oGK8f{vR}nt4T;~Ukd;K3 zYIqEvaJCq67#{kpd3#u3Em~{VI9oDg?Hn)3du?Qern{82(eR+O02Pugsbqf7XE7`N zaFO(+0@&FC%7ZFK-l1VonRmJTmXf*yu4+#k4hrS{qN^rwU~h+z=VsopYkQ$ngt3k9 zc>EP&VEfFl_=jK&{m?K8a7D2n0Qxxna*!I+p#2Y=LrIAEy(*0v0lufgL;%mw1aMP} zQEpvPB+X%12x=*r&uzum4o#e%wJO!Uz=EQBY&iDrwT72ISueRAq*TN1kv+pcZ& zEU-X$HhLU(zEIB9xfR+Prs8hG!)VK^V0^tWNB42a9J7UPmIYR1f1kl*B&wnE*#*|? zvKV$0-Q06EZe+3EIj9BFLfSkayS_GRcTGJ{SOW8?%YjSWFWM2FF<^bopcecG=Ia{Y zP{KvSvOaH$wI9=Nr>v{Q2~1CtD0;B#Y#v3Je$^2X>8?PT$ZBHv;#AQCBU>VVL!!wK zQ>~pvBsrGU6<+hg&NvwG)FO-#=(5A-{NK=xxeDVj`m0W@>LQM?@_Qw<)G< zNX2f2d|%Enn~-0JU>YzSl2A(8<-N0u<7Y%_d|#ep&%bNog9sG1)>FkK2Z_Cv2??8EX#t8Jo4z{flAOM!_fvTFlOoC*olpQaQD6aus~ zMpMc#mGh6Zem*~w%{6clhLL>o{&8620u4wvp1wbNeh;1$EF zyXDey*3pa}Z~?HP0^DjSH;_~ENAVYQCO{ZE?h6}V?9XCXlc&Q>G^>=9T7XZRBc(`X zR}3s|iE_v~T0UkvF@m1b_3ib%0Zk6xAd0ULd(zyuleXt4C{GH$`iaODRusU1(5V+r z2|yX-!yR{`a@7A}fa2@q%W8n~=^n!hP><3_#Vg)1N-U-3_6iU1jr-zfXlARytM+z= zA%_t$0q(L-pra4mbfcHdF?nxpX4MjqKtg_Q{K=1Y3C$x}Xd1Y1^o$-Y+(A-sJU18M z?qBz#x^-nN6RskF>HFfpwj@PuU`kdCfa_=4`d+H&C;>1DMOZbNMQ1+G%FGb|L7IUY zJ?WErm#0h-)7M;XRtrcqnF8Y-Lr{fOdXCPe$eo7Pfl87%!Z#5~Ru2q!oo}0ViH*k- z`8q;1|Fw-<>jHcS41VGJ1^BN}Da2Yq%KA@7=?3P!T9j*Uct5$XIn^IxG6o7e*M@gw z9IBHHzC4ngW>JMCR1_22 zLEo#>d}?qu00cdMAAI+TrlarFk!F$v3a@zwtA+-{;3#S9J{qkB?Ev;9m z*R=@Sn9cYS{jkwn7uz~=qx#X&^#rP5g|y=&A_cf7wBsk4XL9%xkqq^bx9G|P@b%2O zo2&=Q1+`NM434-i@i_F~99dn|SYP5;yx$6c1< z6eO}9K)lquEo0F-J)hHJsavtucl0jqBgD`0-43o=etS#S~)=Rn|GD*{xM&-3up%?3wf-(%RV%0 z=JT(K09L_K)Q`UrFFUZv0Zn1X)t&1~`gH6TkX8&)9*H+zu!Tf_@X4jkgTeeX$`23= z#%ZFUS@YJY;b+R&Kw$u;j2I*cEWxc$^ua(8X{;1J{?v+C=4l1Y9T4&ph>e?AGnAdJ z>891|UvW?3&DTRx$3BLaa<%eQGj0%MP_DE#(66}d2f8)SYY7pgHbd8KG#^p~yopK+ zU~`%vh#8k`os$>}A#<5|W#I7bh@3%1+(=&2a|=(8Zp*&D+!+0YTosjAt6i~*#9LS> zkH^VIo?_xQr-PH58pfrSG_|U z-~e4h@>Qx?Qtx!$5?($c(FLgn^ZGXY`*+4uOAtIHdh-y3RxS=X3f-$UsoUu^n)X%o z=t~%awN{_yfT|?c@9{LKmtT7d2At%GaZ%G45UzMx_XBhSu3xpr#Nf`t3A(hBy@cNL zAI7Xo{VNv_Mn@MWiCm=I|2wBzSedte436M!lUh}{wfBX70>Yu>VCG{PLDE~gb zH`P%@I-ufIR_NKcSN+!v@lKs_s#$C{%X00I{Yt8w^DLS!+oL{gx#3_s zTd4T=wr0KQxh~^5n@_^wlUolP?6XS5mC(k0(dNC9Dyj`?NZQKWsAs2Sm#fvBy!imnSmrUrv;^UobTs;1OPP`gvuh z2y#OBLaZ*97-HsoXq>ECU?l;VJHy0%GS-SB#x+ZmllHvs03!N0lki1$(cbR6i(!BV zDA3gqH_$bUEO>41b)MMG@&dG6fKY0RHw?NP-k|n7Z~X1HwVB8rnt7jCYH+cO;l4?p zVHmyAb9;hXjgI3~8Q8GatWXPH{LqaM$V(T<_$$z7XA@SRv|%7 z1!V?!6OLHei7N_^F7P?hE5X}~l9&ze&>Ed}s{}q~n-0ISfeg8);VEq3nNe)x_M7{5 zIrF}W???A~P^8Q=9nK>dpTUz%>i*T0cVQT&Ws@O0vIxtha^|xHpZwvXZXFkK8ho%C z+?E^w7cNV((@Vf@uiyn7@?30@OpGT-TC2)rlNb^pOZ53_5{K+v6%%hbFP@vJb=jYp zAKsg$S5xXYoAmg}u_Lo>)22<{Je!(`NUQk{-rk_|dDzF>>7FZtutR6Jf#h09MGIuA z83L1<&oT5>vB3bWQCbc<+RPu@kA0pA2HHu{^2%O~G9Ud-basrZvRSw_=XpSObzQ;b zLtPfRQ*n(&$a==yjDYcO=f@e7*G8*m?QY+{%YHzw{rgOlbk z{Jq_}UaqnWk7aL}b555-G83)WJa9=I{!4{yJ=g6>ZnDod*?g_W?o6qYJm(8rS)q?m zkHcfKAG$7^R_TH|>Ec6A4W=^g5{9PF>p0rCxpDj3WSTj1&)(~{&;0`{XUtS>g_-;q zx7yZvA_4S^KZ;_ftxj62oNRf*J!HjrT#_^1pIgsUh+2?{EXplT=2a$M>~SSA;EEoa zuL!1ZrIFv{;qgr}UU08{sMATAU1_(cF9kYwxrfvc9sISo+sP5x2x>H<&Tru1lsDZR z!cRAOHW)DYmGtXZqj>k7cv2HM2UBi>&lMYiJptQ)*Ufg=VkqMsC4n8K1`?QlzPMlxxN` zN@8VZ()dg2^}(+Z=+(W8ad6m3#Hao9-Dm5$(g@ts)wLRBCRa#tPk(Hr zm``&pkFT*< zVmra3dHYY14+*|bOsW4hEu9!GKL{>Ha)vDO zK$yf)IkF|D+q!OF(x5*V_+CMA)lh^x=z0oUr3_ECTan(fGIf|yDctH2S9 zuGe$+100}H6habQ4`5=|w%9i09H?=WXqtxAUes(3xdAAn;D!Se48KN-<5mAvBi*Kn zsU()*jE?u6!-C zaUXMmJ0`>z}@ImRS=|84RTXLG$FtT5v%R9>}9%^FPz4O2L(mGB_Oe1$_*Dt zTg=|wE<^Gy!Bb1g^9JqoO}u{X55T5X)m0Wh!PD6MzM0QT3+3zfmaTX$>aRw?n}jCv zY`gh9AuetsbDL?*3VrwH&Pjen!pufMz#caQ>1RMU3noO#xF=E5pwBFGettem#qd?_ zxq2QUb;lt3Cj8+g$v5Sf@5+JSLmA=A9`6oid7slcAaJu{$Pae&X|qW%ABD$Zi0(`2 z@~C)b%>5>F;z_9^2_=5Cm}z2(Ir+%+Bvp^{{L;NFHMs5)CotQ3@zY30vE~!p zBDP5m?Kb`4Dnfh^5<{|DW+?qE&#t_^G$c`fUSgiP!P@z!j@`k~H2j>73zQ5I|M!5d znm_S;>*PsuM?F1vbhT>o)GB=i&Q+dgn~#L>+{Kfme&%?(E48FgJorLusl6kbFbFgY ze84NuCnePl6^|{|9FbEmOqQr^%hMNE6k8qwE(?3Dl^(|ksp#}FSv2ZdTu+Dd8kf4A z$k9efdvDl*+#~``%R)-WNrC_lilYE> zHksvvW3#iVeBSRPdqyxHr@IIwQW7yPxpOTe{-N}0rZr@d<$g}qW4)~M7@!>Kv;V3d zm8kWv?6>~7Nm_IbIOJ&sP#lNfJZYbGWv9^Sw~TrZd?~nUGOK#Y;Yszn)(f*Y&lZ-Ya2h`d-PN1Py1V|kbD5HN!eZe) z{;I%$o8zS>gO+S7h)R+Asc9Z`+=9^NbEm6GF1b4?Cc;hJXkqLp%PO?7xw!;)0a4>_^rQm zRn;fXN!qB#JwSO`5NYxtpV5|nBC(Vt3O{)sy@pUZX0LA?wV=j4R_yx#0_Izd4?J&i zWTE5$Lcl6ySs|c7K$`p}GkTztuRGRLQdSSy(DzQIkuXTw{3X7IR+k@+HB>W<0Kr&% zG~7i!;054LFn|ER)6<|l7@yTwDhIXE(0=I>!!^8NA3ylgg5Q(TFtrmB%kzgZ?^Kpe zg}xQ=RXL3?Z?gX1m&l|GxsmtH6A#Me+`rl5Xq<=uL@`6W;+d3AW?sMlqlW!m1|eg)KbGgB|Wsgx`%klA&`@#tIX@9EF%<-ZsHsOz_@wCAC$@cJSZ z9-7*(hTrs+rdSnvMxe*81^9^yh}$F=&p#u0Jn&G`?wD8i+7S5A{B>tZQK2uW|3c~WNfSJ zpf2j3`uW=BBv`dou14o??|x&XYDenp)*W9dAcUeNr};|9NfcVT_IJr#+L%M}O~QKfJdbTO@f-ImX<`lrbwmvFn;mW%)Aw6* zkBbPh2k9^28_oCk#4?D3G>=WVaJrY)!g^Y6tfx=4IVnwf=O=9=Drg*9wB66pL*P0+ z*9Dj3mPnxtDMFg|7DUSaa`_<|VFW`H`04&hut_WBWSwF=27>Ra0^k0}*I--SjL1Gg z=Z!e`MdK!Qx57=`v_gDOq4pbySzCALVxcN;e&P4*^Uog5(n5_Nrw@ph5bl5z80w9u zB9l!;`8ncAsv70Fdam5=&ZW{FYS-c~g6QddXS&(irP%nbK2MjenS_uOpTNVSzKc~u zcUdU3#H24g1|@uz$7e6WxGc9g zI4ggyR20W}9Q6t41UoknqcADnj@GOc1V&b^Oz;U*I4@?+H*T!hZCE{OJRe_f#Kp^7N?BpXXCH^-v! z<;6Pzds@fS#y0PK8GEBvMjH}F2Y_o0L{UVLwi&hfYnqX~ASl@zIpw%TyPkxAu#uTRp? zK^`Y~NwiBeS8O3#S&IB~`j{X#hGkzWXL=L+@Wb-X)^bq)^~-=St0!UX2Y$QMbe%!Q z-CC?hbWPl1{EB-O?KSPvmacy6#d{-10eCzp21n7hr|)}7P^8yjjG_-?B=1su4L!$* zv-AZBXT@$7H@cLc1&vW3MLgYh2p@&)5?$H~l1Uqa%g1|v$wjJbtKbgUnDbnzn<$TK{Y;I}!=F@JwoH`PyTfFMkuvZL1KjyRL_M2b!o&ax$43ohLmlhh z=rX+s2Yd01<#~=G$p*ov$TR$nB{AB6R+ww|&Xe~GjsAKx$jz8>g2+Q^_iUZJs8(1V zVj(E_xQU$y(@%c^sQ4qS>{I*-aKvT{-`%``midATY?Ux%D*Q;}fS}`v&mva21HWK-Z=BU zN!|=Zdj-((HA|RGerAlflDSt;3yNv&rO3T`S*^z9hnxqh#i{XlGU{3G7ErU>e zrxM0RYy{dO#j$v3K=~M0uLRHbk~G2M*rPqKSc3i&chpbwu*laKlpoNiUuMb8Z1fcX z7Qb&EVP9q$F4$oqt?XPKKK^mXf^HcFTf*)uCzN>DMPAgo1C@6*Q=+&YMI|EI0tM4R z?=#KUaCBq#$|nTOOE?v{!pU$@*-6WlF*FE!7*_wBx-zZjp1s48(%EN{(~`_4Y=7sP zpjz{_z0BNAo5-3)ugzjr@6DE6jDQ-JVyW}iCuH?XGswI4)1SDH(K|?d<}O*}YLAG2 zIa7x~4d;gM(NyPl74~<~E&slfK)sVz&E-|!dHI6ajoWH6Y+rnUyv3pF^3J>_EAoVT z4RQVZY38nlQo^cn$8A=GSSPY1h!ZqqE__PCef_$w?@j)lhfi3k-yS<&DX|k2_id3k z&y^jl(e5>8^@onop_*kBCLI-ycUp@rv>vZkp3qZxhnqlzRF@a}n_U{SY1!yaHJ7^) zl`lww*lIs&c%dS%0#1^6`55#rc&NX$1PaYI3#n^M3%Y zKvBQlPd*;w@57^VP1adgtUMS2P)-ZS0tIB2b9oQcskE?3HvptiDd6>F+^}9^af5!4 zF&*Gi`lV#}@8UVkBRk6>jxc8|yS$caDNo3sXBLN`QjQ^KY{=NU)OYn=$Zu6olZ`{S z-g@izqAq@yvb1_i+vM9{C@(CYhQ}rq3LE_j>ja{FR<3+}=eq*`u=vN~WY&A_zH>Ww z+Jc?j_J7>KTUR^axvl&H;(;#?b^AY4x6J~U#xG7g%N=>}Q8m)E1z)U-ShzIshP8VO z?83Y~5g%@7v?!NnO-}?}bFl>lHM`D)zEFRa&cDdtn0XC!o48yQ>xo3kB zb422#wtsa`j#p>LpW04z)b?a&b!A9&GzjZaoNIDVD6qXFBp!okN*oZvny96BKMxK`KF!WoD& zix0YctIwv7=Na8X*5*RWi={6sFIV(&SULn~7p7%-0)U8aD!I4{uSct`V4{~|v#fzH z_uO+&yxk#(9O8Le{?LUocVe7MT4o8)(&amXPK;-M)D7yG&2eoW2%wfZAOI!+dW2;T zYnBAjsX(~%{@KLJIst@n0^=XA>HSGV$z`2!4ENZzogZhdF}PViUB3AM_8Di5xNNXOCx%;f>sRudo%Os7d6N+$R)STx z5;WAiGhk0c@hg=N3$<7>D>Zy6;h6ZMY+~XKSsWG_)4(C&U>EBn2C_ieXmP_U6YmhL zKsrpCnY<@W+=ub%dmm0l;6Zxk#-e?U=6H9MtVG6kcs^UEr3X}A+aJW@CrF74TNU}9-7 z%|CPlJX-34Bk!I`TNpoiZ*`ZtY>Loim_pCU8U@#!Idf{IV}J=1o`rarABwMrw$O9s zl~;N*`IbhhS3o)t3a?V*po}~7%rlFI$7&Rm#g}%=docx?DfzO(m*P%cpiLQNS*)=7 zV1P%$mwMzOX@uzjO3F$nEzh(eQ zSEM`rrIoADl{!Z|S-vdZyc5Xe&#qR&lK-@CNERR5*-a1eJ|O_DCWk`t z@awL?7Qk#*EzZSd@dNiUfBl5A?*+I0ljhj8X#$w|7hj#Xhn=H_F9bNjb+m3A=)>+os zzO`%IF{evu*|(9mAhPe1TRM(lwfgw%Z5}y>k<9eP)aQ zTV~-6ULn};y6dj<7PKu5TZrZuW85k#bH_(w(f6*~?{-*yB%k{{vBh@^wzT|yU#^US z$;+-=?VfLYCrlezqaNEB4?U(?hCbz#Q~Xh_W+@-w4;$Lx^{9dwnBN#U$~Nf9LTU;!unRT^7;S2d;DUESGvL>AIPR22JJ`Z-Pin%4j^2`Yi z&@Q{|k}n%zF53)(AN9j{rSS;ee*W{H6D41lLcioa;jp%zbPu1exZ(;AD#R^W4ts`= zZ59vXa?hNHJn=I>z{9-HtiBliEfj9lN7n#o)c|jnw((?>FKY(2vDN%J79Fy1X6=q= zK#t6#EFD{4fY&Xz+)~40COiN{nr@rtP%nty6vC<_X*2f&Jjoasy$hR!=?@G5NSNWn zhx<6RY37L`u`WO!tsIvBcwI4LhSeFkN1Zuw#wh;{2UdCWV|{*C&pP= z)nhDt#Dozw(k1O5-!stdaer=X&Zd@pTGc!@I4WoHI0WtiI0vYnB$G{^hsrD7M;XLA z5bHp!1K+8NkKg44qu2tW z<*}42fTnQql`3!X*g*<$M4Gwnx8Hs>%+liycn`_~wxs13Hvk%dcZ_#KT>&+Oi+H31 z(9ZZL^MfU?+itrp?{DD>U6VY*upG@{)`>!QyJz=Lv7;>jXZGybvrc(3{&q!436-)l z)#BI{MMr(!4PXPv%&;53VpkNNkbznXg$G#4Q5l88gnsZ1pVRJ9*clajH)-(!FQ80H zs~0G-(!io=eApRWAIpeoyv&W=u}~>sI*Vgh;u%QC+zMS$bV0>{qGqlikXaT#jtDgh zkHW|bUy8f&GwYaX?+ZTc{57hpg+GAM2COn5XI46CdA{L>8!TymwPR`EQH&3}oQNz72lU(q55nW-ZL}W%*2AksrYL`VwFT zbiFseSXhHhrf;{#o&&d)?}1 zlvl&c%Xhjf@DJ~dg~C(jjmfo-#NwhYq;g5i$E|Hdn`ypRV zrpVyVTmO-NCnroD>6%J2KX(U^XELI$angawpAAb`oWq~7n^{4&Q$y%dEG}`dTx8G8 zAK6rcHCq5fjGqB`nSY@x;62_WR)o^Rh2;wG32WbIfJkX^OLa@~%>sI*@x!7MWkxR( z`ja=t8KGP99_#RcMY1f3C&OnwGhPA60(dVgjJY!&=cELC&bspcIc0_8Uo8yv1JD>& z_|V7MNg3u}=n6kTpGk{!X>n|0C4R|wW8ndvU^TLkPL=U*SZ^&ZSLSG(fMF+Q*tc> zSvv9Buw%JUz6O*;#-aPFDD(VLxz!f;CR#yeZVt- zj+iLhv1evE&W=gtXg0`9?E?x!X~iS}VmBpznCZ6s0m8DQC;%B4Z}aEM1f3l~p_|=j zO;pT3>uS4RiI0^N!kJqb2TNWkXHcH7+YY~9{_>Z;Jf(3(#W-5DfjpV{R?H z`6FxFF=%dFEgZWbG3_7v)#skc%FQ?5TyH)IqSSOP@9dfkcXgH$pST=NfKrs*muYc~ zKgUx;Hu+2o8@d5H!zxwimUh3}ZoB#8G)RYCrDSQdrs zIG41(BMhH*AEs&L;hj()!uFYri=rR8>86{!LONNNFnpK>?gpYEK6x&bPRPf=mGoB- z78j_8Sk*G1Rv13K2b2uR7G)z!Qu(SC)%ks+Vuy*C9dc}e~#6AD`THo~Dyw)`cp zC|Bu+v4oKHEA~toI3@+cLITf#sEv~O;u)FqeR}rUXEW;~e3I|FKJtQxm4W(UihIi! z`7E_>3+poRC%4Y+sgG2Qf-Ts?+K`MIj+^vLbpLgBj;M!LCke(aVm zR(JIa%#| zevH|Z?e&KygS7He->9R24buEs*bL`wXL|le zALJH9xB69|8ppM*toqA$80#_1yD4#b^UETz$unsIjpLCAp4Q#Vt>0mt-Ep7pkPkcZ zPaC?eFJ9Nb*Ws7+P3=#UdE=N(04B_TykGSqH`Z*(0}K3R6*b!r$GIMV!tw6Mqj&e# z8wt1F)%{9JFPZ-8{CxPqN95nl@t^JBx-{$0v-pJ$ z!>rOXzp>L*(1~L49Gx0~z6Tz7z}cocOUL-8T`SsT1NSUG;6aYRB|c+v(_3O8n=vNt z7B(%7Qryrng*t^&`2p54eQuZzc?}hB*4HudVgn2ZOZjz5ZGet#iPxInsfLmV z^N)S^-Ph5Va78Z$?^Rj;wSas2-N*Va*6R44o9xi_WRU?G)8t@o7MJ=?yMdAxF`47h zFR^)l*w)onhE=}gq37=H-A2c*ZDh z#wT@k8y>y3>w91)Z&ldR<>+0;(A!?Qz8`Cy{_W0=v6%6;%@;?!TXRf>e-gN7ax%TJ z$&0J?H^w(!S4{hka*cA0bs*M(hN=UA+1Xt4T^ZtNsB%O;#oI*qy)JZLsltpDB~VZT zs_ZG~|9$q) zL@joSDrjoBm-?WSZXc8~>h3y!Z*CzE1#5^-5(OFPMA3Vx$$n7GO1y9n!PRvb%%I;=;BhyufE1jYZ8Pd@SO z0sk~I9;y(P%PzZ26bvi613WEgz1&n8DXI_=1r>^ddW!O}{HTe2QH8arl2&xFw`e@t zxmq-mYp=alRQcumgfNovRba2E0$uc|B;UU1Gt!8*p$c-JZ=G2CAoNRhAxl$_UG{yM zf;4=2XqRb3SyiDY3WD?f7&4v`Z;N}hEwL3FpT z?@RR<8BryssB%(N(dhfjw7N(>b?37vAkD{9<)^46DN*HRKF`)qlaIPkShi$5+8k-B z!aM){urSC08S^bru&57Xeq8xh(5LY3;lVd2i!2WcKFSJG(49K3fKyS$v=65XXQJRH ze@{L_BHz^YXP$XR=m*G~)j9IPyC)9sMHR!|k2+CbBE9h4+<*W5{u`@1!+oEo8v;a? zPZC#IZ8)m}5m1 z=`!mtH_JRV%=8DXgG2Ot1@%*&ALu*#^X8k$ozthu9v^+;-4A~M1Apax)4mjVFPrOk zwuAO_JOAt7-Y@(^nJrD+d4{f4!(%7v>*!jYAKYl|Z@l%EwAL(EHfbu8-hW>fv~4F> zRW_0%-+o7Q3%yK!>@j)%(%;E`5B^8`=vV<-Z2d#POB157GJjsKyuJI}FSEbcQ$Bcq zN0m#NtkWfzSK7Hw0{wWTTeohWALAkV1p0$Cnm2E;w$DX5FL#AJ^K=_&+rCWS;bLjo zvU&|NuhFB6eV@(yR^YJc0+o+q?V-4zg26tXeDD#?=4H_FO{Ww zutAv)t`<#ND&KrwE<^X-Qd+#wQ>x}OMhYXYxO>L9j8m9DFt0(!^qr6vdCAQ>KkL<8 zs??a;qRAC9V#DpEOouQ|r^##V-c+j?|Rp4fWjW&?m&wos|{$)RD*1oa)=p3D!E)`LLg3rCpHJAep zA3j_Zz~J>qjMb1e^Cqj8=m2%It0-_s^wb3(A6@9V=bkGHIP>E)#%OBEOWx8sif;VP zOUTm6I|Mgdp3_^ZmQ+cTwz}m>siINs7fF2PdTKdM6m;Rk>BbJ9Hy)T%8x=~+!W6nH zFj3S3TRups{FuMnydfW1Z^FEV?skv94w*7f)Xk~pT<0tA3jM5Fj>^wB(wI;l-TWr# zBeRG;2>mNM6UK~cjn3B@o|)I04ih~uS|hEnU9hm z@&F~@70>i9(4mJO>aWQ^%ujSfr>K>}y#7MLo4#+N+>r6LyeUvn)XHBCKjwPKRY83< z+%1j*486{idIG`I@^r`{hj=&Inp*nzx=z*rSV!P}_SE?*V^rp@tTUkdSF7`)8+Uy? z>a1Gq^Y{EwfRg894(JS7Gf*I&*MCsHJMOr{&rKDW>2)itVbI2qsm-(0f~21lqjSIL zqKoqLV|YO(7fUBzSBwOx6E-eTYpGuEqMM*a0hTK!!S8~re)paJTd#bWOqSul*^$<67ceE+S89XO-L&mqVs*#L4_GFncce>nu{hiWzcuVOvw4*GT zy+}r1wrpPb!sQcntf=E2r6qcO;rv`%_6~fuvv5ZDgbU zyXPa!cx#S4dG6cNe2pfm-+Nycexe`pYTD$mp0d*)2gy4(-tS$Uj@uYJ4LGQWA4|P| z>r4w;>uR#ojj*-+qC=EGlt7d~l)$PZ0mhrG*D*e1u4MZ3Ri^|orz=7N@$0@Kq`DgM zWTMRC)YzfIq_9gx=zsqQIV$SMx_%p%(p;-u{^d zW;K=XC-wlQ$PdIvpp*{*N>#O_=HsY^E`Q|@;K#e}x=R2vAxqss=7E3)3?vLn7|LX1 z`33ldA^`wdeaU!{4}MJQ>O&|}boXLOQK>&DbeI_FF(M_!u|NP6sfTF_K??(a6ghy} z0Js$bRa*H0!~kwG#gFkyyD~YRHEUL0$&?l*bh9+5rzm({d+oK*+b@DrHvnMu=+VQs zp&9|IA1D_}Fv_GmQhlF-TSE|FQIuEkpD|-bQI=LF6x@2WuLl&+eu|OqJom~K`V*80 z08OB<%~B@HNPAaHa-x=r3bI96Ltlf^ysl*BAWf9S0O{aGf0Zl`^_77ipi%r-*aq-L zy-!O!VzGmT$G__?4{S_buHLB0Tr z06ppv3BK=U5iIO4El#nkl_v~`+~~gmB8G%Q70@M%PxPJi*=Yn=g|@^mjS9703%|Sy z7T%q`KgtLQvJmlY0)$N&4d6$3G@Jopv@ghL@tJx6BLvr@hr$$S9V z0f=eD!V%vGcmljrE0#V@>IZVq0=pVW(fFXChBtMZvC1F?);-mAcCt zvh9VrMVaOu*YuGITZ^=u505)ZzI<)G_rl;*-}Ud6_ut#iKfm(wwlZ+gV_w!MJn746 z``oiAm+UV>{tFf?Rr&I5EtCBZcw83&`^#&uj*`xumH}Y4-+o_R@Tv0em;T4VY-#N- zkZ!_zMm+?LjwHC58xhs z(j$*N;=jkm`e@@p8(>jRcQgAoM0&K1l8L&g?;XHihra9OqaEM_kSf7VQytbHc^ z1&eWjpjh}~(+Ha%vc@*lcNUX)CydkRGZ_mpzc~2dgY%0>1v3~u^Aeyg@`I4A9tGsz zi5}0E5A#F4;oFv+gPAPq(jQrzc#RTR-6c@vbkw7t^YMpImCBA~vgz%eln+tG|$M-d!SPx%Jk%iAY&4RZv*RBbrH%J=@CL7tWA{ zuT{yM`Cs|@fi2FK%oKiKD6nwY~hquj^XUBZ1@3>~}^pheL z3fwGfv&_9pH@g_ok1{`FJjM8t@f3hB<_wGpcHM1PSu$70G;_+N!%n&|{%Tuk{y-nu zc_#%h@+vewb2@-w0F;>Lz5C94ven=bvgDO^(s5c}*>PA;QA;)YfwNS9xL4$L&wc-t z4rS}hyoXxpyA&C;#bCMRFSqzjTj=E&2QY7AY=sVvaX3ISHmLADd_p(fF~>n)gx(YR zqmu$qM100>YT4X_C>aw2W@2L$b76RU(y2$)T&+u}5cSn0X~W&u@L)oxb!E}BRnqkH z7P8a!J4snntxH-4#*O^Y)x7b>8y;As$I<${vFWqU?LL_SeWvYEc67SvJt>>%YEpvN za$+utY^amy)W}mQ#JH9?=pxw=&D^Ah>`GvQjL@^d1HBr+t}k@cO_C2~r0o%&c{>1D zfRH>x=&9jBzr&_x#^!94W0NL2V9JCJ*4{5=M&E%xg!+PBBLB`)@=ZF-*^x1601ib5 z$ReZZBZvkMKyG@JZa!}*?EKtXkLvZHLV&+n1c;5YrV({lEjP-kr=IG)$rqqt^1wBz zPq4fZm<{Gel>HplO|U7F_ZSj!(9hDhqu=A*7?C&1X~gDK>K$Wpbn|M7(aQ;c^2SDM z`akp;2HFDrhn@jH)+5-&#{05yJ$a-2=;YD)QMb{@(Ep;#RO@?wk(c!d^l0?O)F*(O zEdD}(_RwJi@?(C@{4LVTlK>mUt)0^6Qm4>gTOC+=Icd0*%2dY%RV}Icu|BrlS#rkW zU1hu9^p~+~UmypcdY~__mA@`IK<9|8Ald=?AqWEpr;IyQF1_p${UnviTeDx48$Y>R z&fnqpa`@l&l~4OVC#}2Y2C1)HJ4t4LIA0nytB@CeJ=w?4|Dgz$eY@s8Gj75-)j`xM zwFj>^Rq*rS^|6##(J4wGN+3!gN?>)8fPL?5oUl5TEtaMJ5~!?y)EM)J zAc3Aedlm%)eBb53f-p)I7L%DsGx25;fHH%HYZO^n1A(v{#)Q_$I!=2|yD|ya<7fS3 zIL-11{w!LugB8#OlYNw&5c%MT@)KYX6LluvEH0z)z;YMFL?TE% zlotkUI_jvSyh1rFtHlq~;vQv;5orNzEeh@AOyh6yc_#+aObZvfSsZpTvS^yLY!ik}y9cxc01h%(I)_KL3UxfVST0lqEBIfN?%MJ z0r*D!0ytO7JF+}k8SI%W`KAn%2c8h`j$SP}Brw^!q($X?NyA}#V z9!jM`eBeR8OChX=r4gV%KuIXYu(ANr_E|vSyNSXPpf!Ca{ReH>`u{Y3h1@6$?bW_V zghe?`9|LF$@GA=r^k;-+kwA|FPG!lvf_PWdnNcG~$TnJk-d8Jk3v>~y{lG!8<*BF1 zTetjO-oGdJJtaH_B2c%uv;>F2M!EP8&h=Sj9c_+YV&8vUq${?9|($$j^3 zp$n?d`)A}#`v$y=#XV$5e+DJ%*;!|8qziz}<<}R!Dcf#$uKeoPf0fIB-(K70vgyOK z&pxmHit~#DY)W9vz&DZwoiv`_G5AB8jGe;q6MZU+AuOO^u`P|Lm(&#&=osVh4q0sD zn*b%t!ZV9nEX+}_=@%J`!J`-;n0mrkkv@$DFo1u2e;HpU(@D!K^<~Jfek~o=?d!eo zJM0jdt8H%ohnMZN-}UZ$JYaO;SF`1zgAZE9h8J35`_4zIj zf&KUMLBc$j>!RD}tFD%(&OJw3Z?K{CRp9wczq%k-zct;pbun*i?LVK;?;1p0+bF;W z2HFyJi1BR}S-)!jyi?jM;9y%+GQW7On*{VIyE-jS*Nyw?6Pj-u?H3B`|LRl1Sdu7# zx=SFmJlnK;g>1U+j`Gd4nexPQx%q0yap%)Emdf?Y<++!h%3OR5!)M)h-nyl1aDdK# z9(z+B{^y&r^egAbfPfzuU)Gh@9MW9+AG?)~nHI`xH%*qA?|dz9JpO6U(q3Jetw!Mi z;B9iZqUQpd`pRs1?v!_B+G}%U{s*}YXn}W0w*X84-UGa2EM@bGZr!^{mnPk$>!~fJ z&5$ZNw)KT_{*f2TtT}2my|Tj3wEzq;UuEMC^A5&i0B@R?H<#DHT_F9>>LlN`m@Ct6 z(@iY~94#qVt9PKUWVUo`-$*xlC@`+9LRzo}p{p@#@r-GidovbBkH(ys_roSnzd=C9j8)$#SZ{$2LDgzypI^0;H8yVHK^hE6 zo3_(=fDTWgr^ZZ_vPQXk|5rb?deonkEzmiYwrTVL@TQFe^kVZE`UuE2Mf->4_`_V0 zItf6DQxQmqG8ZEJZ6gGFJ!Hk)0uW@l!L1Mv%7I0DPU1i>gFHiESE0D(N1D7d=4N`_ zsh?-yW;zQCM;R#}mgQ(ilo5Rj>2XC5#O6`PJLHS;HS=pWoTB?;E(4&Lxi-8QhXe8= zE_H-!AwpJw%k&spKYr&V8RUj87JUt4ePqhUOaNuf4FIl@ZjvY)08?}!#9ahRZgv@HIi>1Xe?s}?w%hJ>o4XYn=OjQNEA>$fr@-H!H30O^X}dq6DG@q6DG@ zzCRLRY~E07=HH)E#hlbv0;}FAvA!jZ@#-Z3Ceuv*R@FftlLRKG5XX-K9ATFm$}E&4 zC`p*`0^*^ZOt?{c5YHy;r3en?S0Nx518IP5Oqc=mplk`#z+wps0l->JvLO_qC|m%o zgy6Ykey}c6=vW3OJSdS++86=MNE_VaPr2dAu^DNL*~kh)VR4A=*fT;I!D4n;F7tz@ zxpQPM$|Dq9^(FZAQENq5YvH&>Ktaa87*7j_Vg|XTft}KLkS2-`K!<67$Fw+>7K%JO z=7DhV3dzEu3`60AG6ltwxrbr7Cq749pu_^SV8=@&!`U;6FceJy8c-5s@wM>g&#r62 zr&-p_u1=*^a}c48nND?g3BIj*t-wN$Rtul@-_gQ5w?^ zps+*fRgAEZiQG71;+0ok$+rm>D^M6h$ku>G>|`!e zl0W)fDEyZ2O`$Jf(SR%YN2cs@M#)%;Op@vIOuP{a-lD&=V_#7CP)7iuk_U(~)Bbs1 zX@smyE@}RS{KD|$hdK%niT8v;o-|Q*BLe`DfQPU+M}4GjponE5gESblqx3ZpcgQ>I zzSO)~e8RI+K1>5SVO^4+5uj9dkE6H;u*!S2MFkc)*g2e}w7ewWEn&j6STsTbjeMwg z>|W%dYY({k~VWX)Wid5`D4e(FkxTN^`(# zW5peuDw-?V3IKQh}p9eUWaa?_1Nd-$nCdnowvHzTo>m6?ooee_qI?7$d~%fMuhNP8E?C0 z)d#+_{7`hWxRNC518ti07+=w+^6jJ^vIrR-2}E4dAs>uu;76UKuLjU-Rt}PBB%i5c ze9IV<@hxHDi}qcpeEwx<+P(BU$nm()Bn81P)iKn+4nI`N^>}3BkNM-WzG$KP5q7p( z9DMmV7v{3TOXcO3LK<$fUz{aVbunp*zL&<@r;Pc-AB8UfeXLnVZlvG0ZGL;Pyl~m2 z@|9YY?5c~u-G&Ym-|CgeTDrJJzOeYXcwtT)){QHbq+aJ67zYE`ySufn$78(2xSa0}n{Sv0_;;*h+>bx{SSt6J zFPrSLsSN43o80)z8|7b*-Ybij<~AuY=4O0={)xE+V^8{Jqh38b$iSYhWyX>z^4zN* zm~*^VOJJ+bbNaUmnJ>FE+F9P5_=X=}y zP#?dLy6@c1AN9v^gPnJ8uNEzrSqpk_>~pecZmtiQIAKN@t;RjNGxSC1UMEhRSi`TO zA1FKH(o%F5mNqh@F0v_yI?Y@>Yr~Mm39kUJ06j7npnUZyBl2NP%satKVwUA#((Tcs zhu6*7COh1in=o%hmjH;2xh;S)=F-e}c+Y8sj)%FuZO}{$6S{GfucZON%q;1nYsQj2 z^D^c|=#c@zq5q?Q2#IHO2gnT_JYfKJ@(yf%Vd2r^a6$_kbh!_C5tcF|H_AZzyg%wU zbKBqj?sxubmH-LM1~J+OCk!wTp>>rbY})8B|Sie#~pW^*ZDE$WDbbV9~~)q z=Z9{Sv;o4ygZD|_NIl{iU?=&Yt^uw^cg>s$eG2#Jc~~Prw~j77)EOgBHc|r4^&sc! zc@|;N@7Y=gdJs0EQU3w1GN&gk^l;3@c^_<`#_A^W2B;Zn=uXHAfQ zH+o3EaJs?XklxkI3@D8rI1QY~I%MxYz2&~Y<-TWMIW;^05B>l3mvhBUE0;}vG*p%@ zsFKBBD$x0%CUXD6f6G^moOGVhU)ESOKtzpWZnB@o%I(`q7NhX9rcqbYAkH4zx7<()m@g<>E0?Hdo`Usk-k2)v-jX(i1bP9%wH8TCF8(jMj}Z zUG=n+N192KiHl{w9lJ_}TE%wy$!)Smxh%d*E&DZFL;CHzRLXQkA0Q}ehpgLB$N15g z)#-GI67v~@MG1rwz{+qTgB4-ecpoJYB~Y3KSR2K_5aYi{Q34UT7wbSl2{4geRf);A zO^zr76Lfn9L;-Nmp0PG_<&{_Fi)!vk9{B*l4PgLg6(R=H>;h$SZHgAs0OZ0X9%ZIk zt7B0WU& zf#NR&d|DWeYv5Q1u7JBNj`?#nH;$%8p@!m(BPPr}3|rTG>L$v5EEsZBVlkfPkK&Cs z!9Mp13@tJmLr&@{Vk2Z##(B}{bg)&4Pqt2jYMsWon+7$hT(Fg@1V%3tc zS;TumX-mC=2mKD;Z|Zw}D5<1_f4C3?pb$kpM0%6~1vH8`;?gGghVk7qO4c274`oLf z`oNHQf9x)Y>^n_bNlUBLvl#%$@3>v|QQ%%1-R(b1`3=|KAM_~M2L^5`S1Tx#`c3-< zc#g6h5IEmxBmAjPr|3e~T&+vg)qC}wKuz^M4La_43tH=1rR}GTx~MYXYI|h$U?Vw0 zt>*n!t>?YFc!_+|)Jb#A*Oh8*Z;vfNAEiQh=evw_O4vM^j{}DuD};vdPoUfBsyLrFvb?Jx3LoN|PqdmCLV~;_C~*HTpx|C5sM8f{$iL z<=;&H{FghtH(lT7pMT?ldzWAFu6~D?B^Hi_*pPrVP5N5<)@Id#FkR{c;Q#`XH~KH$ zZI*Qat7CvMSxg2DM!8t5U~CnV@#b3y(RX0+fPRd6#o?laKTp&?dAo~#>+e}SpdHQCw(<)d3(eQXq-NSbLEmcGmIQTc ziS{Y4YG2-G^o~;5xaNGoLm| z^W?ZMmtx9a<-;NkP1!b>F-K!Q3$Vq;38ZJo?uBWu$ou+^ zJ;He5eO>b!#trX7K5P=fIv8`5JAab1#`E>``Lgq_yU920KnCOp3uBCz8T+F5F```o z%3?7eVlHfH0Z;=h#Qcpp5r7EZWrxk0$qp9}P^;yOq}#AHy!9XAKBd6jPo9~n$1RuZ zh7%pbY5OgO*cgMpsy=|WQuq_b%7lK1u|MOU;p~oUiV=e@Q^nreISQI8+OrcQ9sbn zl|tw;`u6Qx3kb-(f}as#nE%^TU#Mzz)gBAT4ZIQJh^v|?u zzxJqO&MLLUJn!rI@|#nBD;G9BMi$Egxvi#a>B{SOWERbdGfYrrCG~mn_6omT;Rc}IuHPzlN;mS25?Gxjz`7peQs#Dr)|S5yGT<8%t^s`?^0d0;Ap-YSw>R2=YG4gPD645Az4y#%@q3E1xKLsH?O$z%dp#$=lq?0}Aj`KH{X0ya7{L{3Ks{Hh*@j zj!>`=?>YpuSUBnf$|V$57RUVTN?EDnfICr2U>Ptgti>tyin7Z9Q^k0iKRZlqXCo8Z zVgvw*@{tzSFIeVx001Z3NklP|Sy0SG#|6BL=5f&_4oaL$oesr5 zlUn*?fJi7+A(ZX(^8gTWN5Rg(mpV#aLdKK{1u^$ogaQz-4DA)b9PJ*Wj!=e>ct&YU zzS$|BJpMRMPGQ>k^X&?WUF$3!5RQAZ%t{!Z0ZQ0+2e-5|a1Yb5aDYgSsM~}=J}fpF zV3Rsz3l;#Mp!xc{fBY-*PgU%XZrMUk(f7E%0*Tp;j&cDDw+7nheWLWj4L~eJU8R0Q zfJ~Qan6Fg9xID)7waY|59c7)SvDvmt6R3DgR=Y^!wc<(o|)5>%v9y zjfOk1S#FW-hKqh9znwZ=&ilumQg-Zdg1Yg%t%u1p%@?%Rpv|POT0413fqPxI8tUCj z{d3;=@2lm7=30--<+9%o(uLId^3Qu_`sgfD(bq%C_oCU9mtOutUVDAn@pap8-&WTB zk!nemesR{@s(enbm(+Lo(N6*>D&ty8DS@4He*ZHGk*0QW3bgk!ZJiU%Un z)_Q24z-9&7tpWGK^sJ8IMxV-$#T@!W(x4tB>vxzA_p~?41bCc<5&BKWRz~^sG!3n= zviaDP6zI^|d%vfE?Im+_U~dc0^~LjZZI=5MI@VaE8xvZsy^f4H?|fM>TX&gjy9M0& z`AaX#EL}*?Q*|2o=jxkpRaO-(T6%zB=!aYYtS-<6xTCb+zy6&6lM|=SkpHwED{F5) zSdY}Lw!Wi$XX?fXeob}LNkx+;A)EZYy(>0kKsIjXy#c07lcl`_!iV=fM~V_yQ4-*L zkv67XQRzj#Q39)+1oU(M+_{rvsaogV=??>?vdOFR#?{NlJsO7 zVr~}_x&{Lc;coAM`?sEXM-JTQKza4USENx#+SX86@Wr>%wr5Le-6gkC?DaoSkr%Ie z--9}5ygc6@olD$Oh~rc_HH7gd$(L zJgmf(A}`8HdqrQ3z9EayWib8^>sm4_{;i$&J6zqn#s@(%S|oZ;5JV>-x*#NaqIV-f zbfOz&B7{U3iB9wuy?28UMv30bjOg8%L59IN^PclPf5G?N&vmW+>$>*7_S$Rj=e|Gp zwgMof@GC)Jbo+R+n%W{j>I*f8{M=s3Z@2#VdmOG2hvKpEoL3APPri(O($`BX`}ihs zyCf(G@C@?zW{hr(2dghZythL)?ga{@l^A2kJz7%*E8D1BNax%?B^%@4QFW-1zoC4a zOo3b02ok?-IQGH7uX6hr|J_=ZsGk=Fm*qq`k|1y9AP1bxsv6FnM_J*Iqgp2(GbFmW zhCS3|FH)qxe-#l}sxOAs$kw49&%JLCtk^+Klr`D?d03#4e^^qk%@73AP`n#n)~v?p-?*t>u<~1v-^Qs${LrHNi4Fds>B~Io24Rn0bkc>~)R(IC`5YE%4v=Dy#WH}<-tLBiyq*OK#{tSf|3P@5 z^)_gyI2Pasm-3j1o~0KcQdZwKz5nc%+O0?5Z=`0ARd&~?_IPuH>uq@c!8EjS<|lM9s3AQ({X#_ ztS>26M1eWq3)q=+Z>u>`H{YnO=3mwSLGp(O)vk10*X(4n9@(pcyaiXDFn($KZatC~ z)Gs<{vcRuQzqck7Td-7f`6BBr{Dg`>nCR0_K0^s}7Rwc){unIqEp;_;P?JFzaLxM( zA4AHqPVPaMJY|NHE6@D+=KS}&x(;%#+n>$-9!`>vb(ltw3o(Vqt|atHu5%&{q%i99 zD&3jbFB>YjFSoywhvA^AhGywEIwIx2VfkOFs)#%MsUA~c7C9Me)fh3|{pV| zyrU_R_=mrQ2uyrK{e4^>6U1tUI&h6-ndc`T4`3UTG4~?wvGgw6@*M;D6lXMl8be4L ztuOug&jeDM-33PTN*X#Yk4G;%QKHtK9kx9vC#unlysE;(V;J$TUvlb1P+p?Ci08o= z3eyZ(b$Q&*j`>N*>_kDk^p#6QsoM3`A-F%?yfbU+)||04g5Mq& zlBM5ZIO2k&ROQ9kP)XOeYwJGruZ?Vkv(q)ts>6l#6tLx#xanIPibW(}UFl&AgUP^MJpf>&Mr$zRW{M>yE_|91*UaiV zrp9rJ^}}?>)YWHOl)L3=3k%Ow(kNErb2;kzm6s!fcQOE+8MUqYa=fBvtT&TT)H4)6 zwuJQG(1wUl8*QmfbV(FR}gJY>{7l*^p?gloY zFOuMnFC{F-Z=aFGW&m*2D*3%wPsOg7OO_X!T5c~ zc!&IK9QbdD+25S#B`LQFZ^d1N zpR*tsA+BcK;`ePSq_@hx&ruRazr$z~AkO#zoznU<_#2vD!$Sq=6pzJAsb+2ClD|Ch zS3So~daNTlwe@~k;x$)LFp8RC4eTH?F++s-nm_^ z6*~?NTPWQdmNgt)f-(%eY6vd)n|kc3*#Zh3a8M+?WalS^f>OGR6UEvYeZZz2>TtWY z@mH(0;&C!BLX^Qrj@s|z9t8gK6mYU&+C*-D!yJ>HR^#3ynGhtbMf3G9te%qJctSB| zoleb^$qPz_D?4GjsMy($Q(1g}!Jl9EeAGem9%nwf%HLqlVYaP##GdCxHaA>U^GKNk z)X$kw|2F&gkgAq_Uk{R*6)WUM@R5rW#5e<&GfcQiIZ`LgLAB0#e<1{^8{Z zb>a-{@7?4{)d=APP#H(P=}wiSr(s>U#J3m4*Z{dM&osghyCDhfjy`ln8jmu0O^|}E zbUq18OkH;#p0X?44APAbrLy7<;+m*)Uk)w=c^@gcl)=lt0L?W_jPggIrgNw&c z86NPpC1!Kcj9#e~Y3Yu3s|Ql5kIig<7<|YuWRr}~z6?UwU}Dr~1N=JnaSE$#C}^(H zthb(`9+?%akqtiZF$l}gS=I$>#N=Z_Cn~j}9}lMWLKQbyT28{3heSu@ zwJKXSh^w0Ji7gvM*A!>Ue!zD!iFe4ap1QqRF^MU71}~a;S7D!w>$ADel?S8M5M8GC zypa|>rbX7{K36#!>-{PI@!9h2EtlS%Y|Y*SP1CPM?u@3wADqsW|2&E{&#D>xEaI}> z8T1_T_>}PX`X#ZF<*me^`ztgiDm%Q6aWC)ebd_)k+*Rj8V@VR1I#ZtOLeOTX))Ths8d zk$w2&3qQPBlDY0vU)}eTx=@CpHVfHxExw~T3EJ7EmeN<RiT z5pm({n39hO{)PoEc$FqTOb=y(N)-UziW35gKUd~?`A+O@6hV-`gD3wQe%(d}tWgyY zzU4&Xw~38nAJJ8B!lo7y4SLNUjEape3HJAv+#lc3wJaXggJ0B(JYbM&U{QxLBL2yusMupo3bEYTtMbmP#U1c50`T_ALJ3mww zOMKgdW$&Sc6r0Ox$G1`GC1MwWMZZ) z==6O({ZjI1iirD^98GDv-&EMdIJfxshe;u@hT2!NJ?+z{9L!(CtfoBu@mDDKlQhRx z?-U_lM-l7Y#wS?$(KgGy=lv!XXXhBf?WwTW%-j+90WfJ_Ym-eF0)g8tJAAZZankhm zMTMA^dh9#YZd3CCBo@*1*^gp}ad7f&a{9qV8Ua||&#kDJ@jWPA&S}Kb|Evuqyq5N7 z*=*5DVl*;qIs3_AOCh*r-e$jOIRVINataCL_=o;*ad$KR!|dO~Oj)C5Cj);!Nyq)X zH7F){PGu*R^GfVBz15c^U!p+CO0jD@6PM+nH}O!tFlCzrdOgOU!9hR~zg8k{NlT{X z34N_Zypb4Q=kJ!&%CHXOyI^^%vC}I~dusx|=I}^*+59i^1SW;Dx|Dk(f3Q%C3Of8V zyX*;8!ZpkNogBqn`khTuRk{(nadm(1ZQu@Sk1V07XB{qv3LKl~a|ET@3e+NSC^>63 zS}P3IxYt&>h~Wdb(WcnsqGhLVwl&~90#3vqFLl=g5gC>gkpi1=tYaW9$L=!!y0)=X zc1T5<;k+hMBx|}%q-v!0ll?_#BM~cO`45*(@GQD3N3ykTlJQ`lfKHJnvyDwU0^`Aw zsLJFQuNG;&dHSv*4rX8(os}{LEzr8jW%7c)f!|S>C-^${C9mwwP@EKxlDTTODiYi)HiLw!M)yL|lGf0QMi2aS=k$a)V`KXqDC^OasfSKNkq{n^s*&F^-ACZq^G|Iqc%3XxU(pknelTKe z&&MEYGFFPW7<6tjIypN|501tPQDg|QE%twbI(V4 ziH^c&DMFgy+Q<>3`iI_Aj18`4EFK#vM#zK4#gWnZx=YqU#G3)OyD-&~;Iy!ZbE8oj z8fMPzVr;EuCwGAQB__dZk?v%*j%}}$grLp2QEFj53dek;Z>Bzf;0*G-84`;IRy~mP zdDZ%f@3A%$@^D&vHw1@M}voS&21xxV>?(15y`5D9C!w;PM%`56Je8$+#I=qyZ;dSPl$M{Y7w9i{- zqS2_Q(!iP-yYqYOyY@^_G3U&&9d*-ULP`Z|7^%j&OywyEDC7h5Iq@3Tk$}(W6slp} zE$MlzKKQ5VwvQeTv1m$d-x#Y~gl>IYy|O|xhfJy;etFhLfKPSoNFl|V{JQ3#_)lJ3 z^<%+mV>(8nEeZ3=HX)zP_)`qiVsK1e*w`-^V zezXmoweSyVvc;=#va>(%mc7ZYgyWszp!>4#li!4r8?W1?9q%}(cgePprm*+j*I2wj zt%w^m4u#Rs1cVA+3%_BTDt9d0eUV(j_tDGbL}7VzO=sl?a@=dXz;(Z?pTJ2KMjJO?< z1E&k{r+`1!j`}3c)m|VsO6D_(@S(mPg}%8Kh27+|CbA{^wM%FkO=kKx1pJt0DK_qL zhETn11jLavzgGc3qG;REB=Ph`ud&W<4`_5Hq}R*kR9SCyJqrbx=lDd1)nF_*nlaW~ zn7gfvtx=sE1H1Iv&f?YU0aN63JOCr$dz&=v5+ivU*?XLkgFShVxy0!VdNGt;)SBUs zDSGoO_TqGl+msB>^PhwR(PBysz^$n}7K*O9@65j2nE>c!;2{9*rJW%hsvJxSd(q*4$o0GQt)(4@6)K85rgoo{6?(4643Ac@hKhf2`f za$izhmCGj%AX%{|f&fWjQNQWT^*B6p*n;@yLzPSPB>Swj`d@|G_L(hB_N`2A|7a;4 zR^Tzja<9iiI9;ZJA!BP;g=v|8_qP0b-&B;S7nxxEsfCfdXqMuz0<3=UAmG5a5;j<= zvYdjZ+@q>}wo*Jp^8HZzG_cy!X_i6BT$&~w_!I2N&lR&Q&Kb5Emn}XSC4CR^Nru_gm2uycEPn``L-hKsZcd|0_A05FQv%Ny-%eB9QI$?9ZQyn{W z8}{t6*1W19)S@ z4ROb)-4A7b?RAersg-1rF@@@GWCwIWJ_;2pHMDv&$9R_~L|sCD)Y)u`1ci2_gybzr zDHKFDJ#tmO!7O~b_|x=hMa`3j+bcuG#&?$*I$Or9exM|RO-_jpBLfdA*UHxvvv>Dq z296om1F=dgYF0i6JANY?3@W|riq4OQNS5mAHDMTpb_e%@W5`9utI60E~F>hZ##PqhSS1g{(FhN*1i9n%H`6GVp$+xHwYXUt3_R)r^ zQV|PM9Gi~J$#``x-skco))GK5M`!N5z)+dzmX@OTz*F?C62lhX_54y(_L|~2w$QgL z6maLV&4^1VSojiS$?T|S%ta^vZLN7@C4*rWDZnaB6i^sB@b0yH&J5)q9p8Fyk>lnb z*>~X8XM{zEFdO7<29~8u@Wwuo`uo%*NX|&m8X2pD zWuUgI%8L6M_CUE=fP?%XZif%Cm=y z^jCAZV$vy0)G=&l@>j?}iqYXOJ^vHdr*o9uqaBfax|}7gchMOt56z{2H+cr`X-&G* zD~4StN&b3lE$80}zxvQlSqlLN4H7*YHt;9a7x7TEyIeR`9ON0~nR#zYZpR+&n{#hh z8UMj*{Q)u72QYV=*ai_$O(H~*gANfc)A^lnuId%hz!s_bBfi`Y{_cjnl4 z)@qBFp*dkN5`Q(-LjB15v7!Ft+lch*XsMza0rClgH3((-XVey8GGN z{l38z2^P;GZSyAr%<#Ht@f`r40LSjyLR&FvmHOh<+`+=^%7$h;)z$AYl)Y9HDtQO4 z&vey;eifxyl%N3f#OX6@(()7v#Er(*`npGo`T!Mv<;isi`J$ zPFOst6QEptuom;&lmts0EjgBt7Bcn&cqT4FJPU@}in5D3n6vFnaNf!?iL;?cy{)Wj z$bCd2&^2F}FR(j9mf)+fnl-(CgfIAd%I&sDt7*p14SB90^`!0#f9E}%^{OMfkKODk z+3g#bcJH2B2Rfb{m`Uh#NGdM}m1|eX$8(%^pE;r_5DD!>4^)!F4(Gje@qe_e6TDPH z&9FfQrITwgd8VBiUL*IT`A#356Ic>y*8z{sg?i3yo`Xlo11a`Ya?MCvp^0X__u*I6 z3cua*8HSE5EQEEt)?T=2Xq~WU6?B!X73-KhvvhL_rl?=C5eP+1)9|%XPv;oDK__A& zW_6pEHUm$Uyj-1eO0V|rC8ZmiXHm4A?&L31G?Zd5y}3^^L|W(K4NSBA-EC0#QxhdW zBQ4+A&}mX!dlM@Guvhdl>`&H4uTXbw0LcBc|YTt-fTWiYdZ zftjL^euiQ}h{EktFS9cX36GN={& z%RmYZm(#H|%&KMcdaMaK(zYiqs0&>=9o1>|^NxZ^{#>q-d{-c6|Fh{Z&q;ELT^bor z(eQcp$23Qg~ygUgOPKn@v7bra9i$g(iC2C#}vb6d7gl1;a;JHgqq@^yfs} zGY?YS67P+e)OBLIlsB7-z_0NM$%jR2@07=5_3X;c+FgBbeaIpilk%3^O!xiNMnFc& zDkRtIdI8bokL+iffRS1_qsAd3EOlVTLaQZE-(&9C9?oBtRdy%FQceOwoO1(0XRpKB zLCB%h+65+@du%&O-Jj9u?7vKpweTILIVJ0Meq*)vait==Ua*yHF(+E_MrK2#5~}?; zI31!eTroz^6|?iZwi^T!l^GcU4i#zi`+&_>#tev~e`Db6?8dpn@pP2OJ6$qsK&sAKc^BM@ME7T`9A z8l*AH`m&Q{`E8QkVDxKb1#9ky$GG}k?rR-fSPbY~F|Z%Nc}>acQCzc=48~AlU+v=%Hk;(V zkLH*_0(uh(6v4JFn=my;S#G<0v1^06DO6ObHJOV3v+5FPK`0IOYLst{3jw(d;4@xW zkg&zh>`Kg$Kp5_zSnVWerb(NdBzA$x?fMgs49!)es82WE+_8z~(|F(@U}?&v*GIZ! z+jjjebimQiFDq!bq#=N(haOB;?*OT{10~?ww^X=_-vr1h-c>f9s2Y>(suGGfkl+}k z*G}g{OlHa4m!dLEp&U|^ISAqHi5yK$imHyFZ(W2DnX5d@JCB^jUtNF|*|4`;MMo7V ziG-)4$dj30Hm2NZO7xixdjgs8qv9L-RNHgY z(`!QMU>@TBi?xI+u3juR11(y_L%kM(-Dlybgv;j@P?g+%{@{xTmh2rxpeN@;Q(@N< z69Z$uz>lN++FBba3?lL>Plh{1+{u`zk5hcFOF7QX8IfinGas=*ph2k|975tiCd590 zF?FkEZ$4_%7S293rtpiz^hgi$3tKXHqJlQud|HqsdAp4Ux&o=|b*BD+s2MD!r~PcU zi{;zk$8SfMnp%&fE0<|CW&rQgmU$W$r0b&}&%9CG4~!u+$>TWy9ZJ@cT?b87<|fnN zQ1boG{>wuQ6y&T6h_B96eyuD5M8+ zUVR=aJ|!C>U((v8+i#wUVa(jeqodjoLFB#|RE&Qgl++85av4A-)c7ZZ)Ed>-Er${^ z)7wsvxks!dm)EjP>+*QJO@k)6aEsp%3b_0y`kE@4QEF&mfHrbM=UDO&$2$bJ#D1oS z5V9o02Yyp&eEY!4(@l=cetch2g!WGF+<`#YkjdpDWonQh2{>G zqMp!tqA$72B~|#;z#=z8afMiRVRw6qDz^9Lj8;#@|8UIg@8%`gdpii&UoBtbP4qp~ z((cvuGbM|+YmoPm&5Q{Lj*7?UvdW4}WktFBjmnd;5OTnU!SHZVEG_W(POHMLpQNaN;>YwQdn(Tk8s_*dVcifqXexXL_ zNdH@9sKrH$wKIyo^KX^ur#sPIuq6KdRKx%9TmFyY@?VXFJSW+I>oVTvt-iy1`#o6j z<(zm4|NDQdZZ5_6{-59d-xzIqH3|2{IIsHktA9En|4#j{AK+iRDP5Y0$~O)x)Ijip g|F;wV_h-D))N0G@{aJb;0O5G?OjoT^#WwtZ08~@*X#fBK literal 0 HcmV?d00001 diff --git a/toolbox/openpcdet/docs/guidelines_of_approaches/bevfusion.md b/toolbox/openpcdet/docs/guidelines_of_approaches/bevfusion.md new file mode 100644 index 000000000..ef3b427f2 --- /dev/null +++ b/toolbox/openpcdet/docs/guidelines_of_approaches/bevfusion.md @@ -0,0 +1,35 @@ + +## Installation + +Please refer to [INSTALL.md](../INSTALL.md) for the installation of `OpenPCDet`. +* We recommend the users to check the version of pillow and use pillow==8.4.0 to avoid bug in bev pooling. + +## Data Preparation +Please refer to [GETTING_STARTED.md](../GETTING_STARTED.md) to process the multi-modal Nuscenes Dataset. + +## Training + +1. Train the lidar branch for BEVFusion: +```shell +bash scripts/dist_train.sh ${NUM_GPUS} --cfg_file cfgs/nuscenes_models/transfusion_lidar.yaml \ +``` +The ckpt will be saved in ../output/nuscenes_models/transfusion_lidar/default/ckpt, or you can download pretrained checkpoint directly form [here](https://drive.google.com/file/d/1cuZ2qdDnxSwTCsiXWwbqCGF-uoazTXbz/view?usp=share_link). + +2. To train BEVFusion, you need to download pretrained parameters for image backbone [here](https://drive.google.com/file/d/1v74WCt4_5ubjO7PciA5T0xhQc9bz_jZu/view?usp=share_link), and specify the path in [config](../../tools/cfgs/nuscenes_models/bevfusion.yaml#L88). Then run the following command: +```shell +bash scripts/dist_train.sh ${NUM_GPUS} --cfg_file cfgs/nuscenes_models/bevfusion.yaml \ +--pretrained_model path_to_pretrained_lidar_branch_ckpt \ +``` +## Evaluation +* Test with a pretrained model: +```shell +bash scripts/dist_test.sh ${NUM_GPUS} --cfg_file cfgs/nuscenes_models/bevfusion.yaml \ +--ckpt ../output/cfgs/nuscenes_models/bevfusion/default/ckpt/checkpoint_epoch_6.pth +``` + +## Performance +All models are trained with spconv 1.0, but you can directly load them for testing regardless of the spconv version. +| | mATE | mASE | mAOE | mAVE | mAAE | mAP | NDS | download | +|----------------------------------------------------------------------------------------------------|-------:|:------:|:------:|:-----:|:-----:|:-----:|:------:|:--------------------------------------------------------------------------------------------------:| +| [TransFusion-L](../../tools/cfgs/nuscenes_models/transfusion_lidar.yaml) | 27.96 | 25.37 | 29.35 | 27.31 | 18.55 | 64.58 | 69.43 | [model-32M](https://drive.google.com/file/d/1cuZ2qdDnxSwTCsiXWwbqCGF-uoazTXbz/view?usp=share_link) | +| [BEVFusion](../../tools/cfgs/nuscenes_models/bevfusion.yaml) | 28.03 | 25.43 | 30.19 | 26.76 | 18.48 | 67.75 | 70.98 | [model-157M](https://drive.google.com/file/d/1X50b-8immqlqD8VPAUkSKI0Ls-4k37g9/view?usp=share_link) | diff --git a/toolbox/openpcdet/docs/guidelines_of_approaches/mppnet.md b/toolbox/openpcdet/docs/guidelines_of_approaches/mppnet.md new file mode 100644 index 000000000..f56b45c6e --- /dev/null +++ b/toolbox/openpcdet/docs/guidelines_of_approaches/mppnet.md @@ -0,0 +1,73 @@ +## NOTE +**If you want to quickly develop your own model based on MPPNet, our recommended setting is to use mppnet_4frames.yaml, disable `USE_ROI_AUG` and `USE_TRAJ_AUG` flags in the yaml and train 3 epoch. A reference time cost for this setting is about 5 hours, using 8 A100 GPUs. After finishing your development, you can get stable gains when using mppnet_16frames.yaml, enabling `USE_ROI_AUG` and `USE_TRAJ_AUG` flags and training 6 epoch.** + +## Installation + +Please refer to [INSTALL.md](docs/INSTALL.md) for the installation of `OpenPCDet`. + +## Data Preparation +Please refer to [GETTING_STARTED.md](docs/GETTING_STARTED.md) to process the Waymo Open Dataset. + +## Training + +1. Train the RPN model for MPPNet (centerpoint_4frames is employed in the paper) +```shell +bash scripts/dist_train.sh ${NUM_GPUS} --cfg_file cfgs/waymo_models/centerpoint_4frames.yaml +``` +The ckpt will be saved in ../output/waymo_models/centerpoint_4frames/default/ckpt. + +2. Save the RPN model's prediction results of training and val dataset +```shell +# training +bash scripts/dist_test.sh ${NUM_GPUS} --cfg_file cfgs/waymo_models/centerpoint_4frames.yaml \ +--ckpt ../output/waymo_models/centerpoint_4frames/default/ckpt/checkpoint_epoch_36.pth \ +--set DATA_CONFIG.DATA_SPLIT.test train +# val +bash scripts/dist_test.sh ${NUM_GPUS} --cfg_file cfgs/waymo_models/centerpoint_4frames.yaml \ +--ckpt ../output/waymo_models/centerpoint_4frames/default/ckpt/checkpoint_epoch_36.pth \ +--set DATA_CONFIG.DATA_SPLIT.test val +``` +The prediction results of train and val dataset will be saved in \ +../output/waymo_models/centerpoint_4frames/default/eval/epoch_36/train/default/result.pkl, +../output/waymo_models/centerpoint_4frames/default/eval/epoch_36/val/default/result.pkl. + +3. Train MPPNet (using mppnet_4frames as an example) +```shell +bash scripts/dist_train.sh ${NUM_GPUS} --cfg_file cfgs/waymo_models/mppnet_4frames.yaml --batch_size 2 \ +--set DATA_CONFIG.ROI_BOXES_PATH.train ../output/waymo_models/centerpoint_4frames/default/eval/epoch_36/train/default/result.pkl \ + DATA_CONFIG.ROI_BOXES_PATH.test ../output/waymo_models/centerpoint_4frames/default/eval/epoch_36/val/default/result.pkl +``` +When using 16-frame, we can just change the `cfg_file` to mpppnet_16frames.yaml and the `DATA_CONFIG.ROI_BOXES_PATH` is same with 4-frame.\ +We can also save the paths of train and val results to ROI_BOXES_PATH in mppnet_4frames.yaml and mppnet_16frames.yaml to avoid using the `set` flag.\ +For each GPU, BATCH_SIZE should be at least equal to 2. When using 16-frame, the reference GPU memory consumption is 29G with BATCH_SIZE=2.\ +**Note**: Disable the `USE_ROI_AUG` and `USE_TRAJ_AUG` flag in config yaml can double the training speed with a performance loss of about 0.4%. + +## Evaluation +* Test with a pretrained model: +```shell +# Single GPU +python test.py --cfg_file cfgs/waymo_models/mppnet_4frames.yaml --batch_size 1 \ +--ckpt ../output/waymo_models/mppnet_4frames/default/ckpt/checkpoint_epoch_6.pth +# Multiple GPUs +bash scripts/dist_test.sh ${NUM_GPUS} --cfgs/waymo_models/mppnet_4frames.yaml --batch_size 1 \ +--ckpt ../output/waymo_models/mppnet_4frames/default/ckpt/checkpoint_epoch_6.pth +``` +To avoid OOM, set BATCH_SIZE=1. + +* Test with a memory bank to improve efficiency: +```shell +# Currently, only support 1 GPU with batch_size 1 +python test.py --cfg_file cfgs/waymo_models/mppnet_e2e_memorybank_inference.yaml --batch_size 1 \ +--ckpt ../output/waymo_models/mppnet_4frames/default/ckpt/checkpoint_epoch_6.pth \ +--pretrained_model ../output/waymo_models/centerpoint_4frames/default/ckpt/checkpoint_epoch_36.pth +``` +The default parameters in mppnet_e2e_memorybank_inference.yaml is for 4-frame and just change them to the setting in mppnet_16frames.yaml when using 16-frame. + +## Performance +| Model | Vec_L1 | Vec_L2 | Ped_L1 | Ped_L2 | Cyc_L1 | Cyc_L2 | +|:---------------------------------------------:|:----------:|:-------:|:-------:|:-------:|:-------:|:-------:| +| [centerpoint_4frames](../../tools/cfgs/waymo_models/centerpoint_4frames.yaml) | 76.71/76.17 | 69.13/68.63 | 78.88/75.55 | 71.73/68.61 | 73.73/72.96 | 71.63/70.89 | +| [mppnet_4frames](../../tools/cfgs/waymo_models/mppnet_4frames.yaml) | 81.54/81.06 | 74.07/73.61 | 84.56/81.94 | 77.20/74.67 | 77.15/76.50 | 75.01/74.38 | +| [mppnet_16frames](../../tools/cfgs/waymo_models/mppnet_16frames.yaml) | 82.74/82.28 | 75.41/74.96 | 84.69/82.25 | 77.43/75.06 | 77.28/76.66 | 75.13/74.52 | + +The reported performance of MPPNet is trained with 6 epoch with `USE_ROI_AUG` and `USE_TRAJ_AUG` flags enabled. \ No newline at end of file diff --git a/toolbox/openpcdet/docs/model_framework.png b/toolbox/openpcdet/docs/model_framework.png new file mode 100644 index 0000000000000000000000000000000000000000..a827609c2b3d3cdf64d968c3ef510464b826e761 GIT binary patch literal 104083 zcmeFZbx>Syw~+_PseKb}2%-VYB8 z{=z%Y_vzU);%A})9~GRQ@3p~cDZngl?Jsc{ijg-(N=!c`Q9viI6`jvPOB$qdPjIM6)>%eCGdB}ZA8fh9}4*Y zF5kN)+#(2bFR@*-t-Aun)zlvPMcZMPYlTpR67{2nAbm=);&W)v+h&> z|7HDOb{GEJ<#u`h=TKUXC@IN^UV&<0e?6$vrc;SVkS3t>1(U4-s_jt+8}_#5PNH>F zP!^ptjS(WKc+GlUmt^&RT-#*HmH20%$OAm2F-67H@^nIOB-STO@;A%WMjrr}3o zl)Gy)mGWPYyW7A z$g0?AbIH1uPE04Oh^Xr86MqFgI#-oD_mbaeDw84cKV0t>LxuZ<8*h>wg@4;4{~u3E zgo1($m12P|^53>w36R^lbG@It-phv&b;DU->+#Q@e^4IX6v*wNM&2bdq_#xdr6e)( zFxes|?AIR&{|N9Na5Wkvd3uAv!B0vd+uGAD1((2CbkZEC`;c|PoyzXFBZZSei`%c)(rt5mUd^khXFiq$Hg%V$ zy!ZBOSm*N?{!G-|5t)}5Ov0RaR%BLxLD2d&DgX@)YuNKu#9UWl#DcKspEqB!4UeXy zFR$pl+nzD-I#G1NTz8U_pA2xN9e+kcfSIW#K;yTCC#-`YY0Ev*jrz7G3{zPEdzgg|+F5Is)|_&WTm z-s^*v9WA&hzW2fkEY_k_zT5Rut%n`^MLnC=Xb7;B(+yGYDdq>DEZsi_EK(3(beVQ2xH z@7Vb!)sFcQ(c9~p>Qphg=aL0;h3!?box0&}{RIAeLx}Gh`h!Vd2=eKTJaYL-Zk>}F zed3ooEqcw_eF(M#ZVm4pi#eA=dcHeXP5Hl%TsUq=-*ins)Ufi!>;iyuTbe)sug{k2 ztg|mpefaQdyP_t4uh+uf(#bpCh@_hW<{ar}I9CDEdtr^Q8I(^yXz2q9er1ctePgL+ zy)<^G30q%+cH}(MVosvjctRR*dLg5(G*ENc)W;*w`>|3PO@27dTaRL-b}5(qHW7cXn#pAS?{(A_v4`00|ONQ)Jwll z<4UpR8$Fism)i>SLoOyXeNNlqh4C!5Z$QRB)t$D(x76gZS6QU(XWyP~po(V1@a`LI z_%g#=%v4-Y9tO8FjQxg-4@Uv34y-4Ax|ii^0hX1dK-L0qEX5Ogi!{0R8u)I_K@1bfFz)b`=)XvQZE*Oli;o14~~ zotx##MNeVml}zuB?c7CB)p>YoWz_@Q>~x@k;9%LcYqH!QN0cF1<5AUT;`4O73;zi_ zS6`p$PSP)<)bEm}JN3vHte#sKLOIX9msMAi5Sz4ye_-rGK&o);=aiTsXt3!`iFl2c z7AQKx@}*{XK>w1vE$VX`KP9@}*ywe#^~`ydzyym9T9lEfjtQrE{n?MHZ>Zd^V}dM` zQ*yFut?$P)Hf|_10C2)Xw}gzusdceB@`}W3V{f^@r%m;TBbp}M+zA9D5&XzHLV367 zo)Vzpf+tx?e8sPFbPiD(cqD}Q?5%Pg9f@0xRZDvov2nG1O>?iCORyUr+#6nyS}Y-O zyM9oeKfbDD$t|F+;W>fuOYz}FwLO9onU10nk)*Er{-kT1YRzKt1M%`^Qp6G&D@h2A zMH8IYha(EZO0KPOOUfzfo3B(g5!n@#X$9Y3#121vEw7OlkW~yP#fB^!=XE@LtGiOB zY<_18akF?&SQ8ca%joUf%xgMGN9%-42q9!8FucnoV!3AY9EH#js_iSXupRzSc(_N= zk1fbGm4t$dL7_pymC9Ge5cQs2@9yM}AZw+5sYV#oLr$qTsEBYPvfX^ z=eWDOAmHEd z#n~is&pKqZnU7lMnT1p>}lr5=DaYOb=Zx_ zC}251P2M(BJ43YYHBNRt&0I5A?>Lzul1Yver^NnHIs40GDkgnXQ?fK_Sg7)gu^6)7 zbb~QQVasDQsMCO=0P>>LkWzH9u+3*f>mHC6vVbZo-uLQMndz4QXKA9et~#PWVY^PX zt=i@yg!dA`1;T?SS$X98+dgGsH@0G@L7KhHoWP?L4&Cq9mU2A=f2izL30bB3XLP}I zvVhBdt-p(3MM$nz^!5ITF4%@jATAZho9uMaWyfUrt9CD85m)vKv9kFfUDs z(s~e>HTPo-$9dfe+FQ5(38+n9!M=dL_mFT9CPkH%Wyucsw5QyVtYXRC-541Y>6D~-_SLQNf_im^boXF%odcW|6{pUq8c>3y78Eaf`KPq+D3wX`CIRc zzQI`XoXRkOA?@DJX>y+xKT={74;)y18_D7EY%qU8k?Mg|kb|n^W<@l-;P%qOkms{x zynvwfn~;_pU;jjEkg!=QMWnfgHV=cBOe>b3(^Y6$Fc4r=cpTX(mpr^iy`ip}2-^WJ zQ5D;=ANn_7J3Qzs(<-&sBjb0=rw#Y4nvgPc#>Pft5|S=oYKs}gw2#Q3`+YP_9WHvG znq9O^tnrxyP)4n-_>iX#r_EDsikITs_f)};iyF!2_O;1U-{r@QhZrG!!3fhewp{vM zegIlCji^DSSM5*xU8uWgMFcL30LEIoOx#dokTojqc+U3aA8V;z{A^R_{4ynza#O;4c1H*nR(&{Hf=l zX=}RaJ!Xg>38J`gG;Ptzu~M6a?!UHw<4BGf71U>V^E$dhHedbW}h1Brl)Ra0rXY_P!|skiv(z zE~##qwu|oJ^}(ni>a_!&b;{=vL&WET^Vyy5QJgHAV0JjCYl-Q`|0Sel z&45-3Ph0TTvBLTIvNKn2+1#$e_}KBP$s)0Uo=Wf1e?8fLuqoMgtb;PxyO+^!@_{`{ zI*l+Qw>))&Is~a)cjCoQ`jq|&w56xSh)>@&)8Gu)jylN7X^h_}5JQ+QcXzczIl(%%b>N_ET>gkD2lj2G+tUEk&w569_&`1lah?KmD|K zdx*Fu)UAw01tk^Hqjyba8YeuNmTuoFOPjC+WnbD(Or3O{UY1|~Fs9`VGG*0oK>vMGw0zl@C+)K$jcY&oe7xkxa*3?i)(qd-TH zlUMTYSYGtaLRjvIsG>2dptEcI z#g#gCYha6d7sGMAdgJvn-5s|jo4@wA*aiOv&HMPOy`L;KR^4z{?i%QVZ>Z?dLI#EO zt>2XRyC@AONnZbPBfMT#U5S#1x8>MPuo<>DQ{zTfN7fFOlpyD%_u8q@aU*dkAA=UeG008?bkk6FeQUPGWM`P>4L)11E! zuzq;uvFo zqL6|aH#k;R7P}|kn5Nr{4n`T)FprMm4Wk*7Ol+%J>}JP0ZYCfEwy00yz*WSn zD|cw0=Ms^!BLeLBET0uM`a+FMPgnPb^9u{!q8y>+@RYMP;pN(vgI zjfbW6y1a?<3QIms@y9Djl91DzdkdcIF>SDvvU=8SxHq|Vj*N(7+Sv$^4 z=Mpv<_x-jva6e)rYQCYJh*`(35AQXpj2z(%z&r2^9UE%;7Hg(l?(3N9=?&H3u;Re_ zI>!X}_@!tl`{s+1rd#dT-&IG(tFTh_&NGO%XB&vH)E)5-o6cC^E)Tj7Pj%$b{qo|K zVIf>$4dl%q#DvHPF$g$+Rc%sK7;HwB9;5V&jgunSvPbjN(SYOwNleH zId2*oHqDi6aN=`J`9V(tWq}y@mhs#@G`gU6PXP@2!9w`bs=-XrCHwKfE7AV#ulekj zSeo3945{5N_#@?KM9$C68!m+vSx+1-^cLcqeaM_z zwaDX_MW3}hlm_LOoSNRtTU~YfXv~Z|46*3yh99)dYMg%rKctg|RVzX_5_(AfKHL=9 zkVqQH*tSGLfGwZhJL9#7?7JY6F56Zb-Xqvd(Wf^TBm)4Ew)@1)0~q4{OF`W>`X{vR1pex&CF@AEO}kB!1WLwUTG zAH#7yZBi~`Yu};nzrIO{v*Oq-S2ho=zfl^hJ#)#>S!IHx%=Xzdr(7_Lw>%`vF#gJ5 z;4|p-TMY~hZC8k{iDS_4crBfYHmIXN?Hp$XiXkd!3IMtte@;7EH;n?FriAAo>l-9I z(NBdeUsmiJoNoWjDYI^hLpq2vC0)rI-$m^qsT&0?ZIW@)rxp@EVg{*y16Da*dj)5wPC?EaDSqG)`p_}lZ4mLJyd-w!u(9>T z(s0?n;p#3ssA_vOs)PcPQwKPu&I%fi``|u=(Ed4K$k7;tYf+no2E#ekPU>N_Hz&HB zmqflfDvbMA&K#Oe*Z6==1pcBNYFrT#(y$?jEVl+-qtakPSJC^lu6?uRIY-TO^@!#( zj%&N{EGan`1!#Xjwd2jin zDi1~8O^*3kwD+zMr1`YUCgXWs&{ZQL64AW0zVXZA(RX00J)Z|>pe#|Rl=&x$wf7-h zynD=ydwJD3y@y2KeCYfH8lVr$0ZHE8SIW22|Cz9T*XE=Hj$u@PIiEDgU?e(yclvE+ z?NNy+gmV)C6umY^Z0l`03D+YNhf+-;cYP`@rAL7;Qg9Mno*KTDKM@qi*e@r0*!M?O zWmsS3v~8+=>}mFKzDzfT_wz1F;|l?&^6uTVW^(Pzti{`rQHYUesE-8QBq=i+VO08V zveRVO)4_MB#g#86Tp{@OFU{-EGs%Y4JeY$EPE2X<&NCF`B)2I zy9_Ay#zQPu_HXG8mvR&p;xHm>?CGeA=X9v&E5jz1JpD@o0>hy~((WR6d`h9hc>9HrniIw#2(lbrwp5Iz!A?fVmzu)?Uq^%QGe4n( zvnc18x9MP0oe6_8w05}mAO|k%B!n$1w4gx-$Q6BR*_h-T&0<0=rowW{Z>KIiMmx`Z zO((_e4hb&M2^Kf$l9X-=&rwmnS{JUx!@K&ES+fP?TcxCw90keM9pO|%fNwtvaC17x z?Mj^2oKx#JxB-2@A6O96H3wZA6*SaykE=C0HAH*Lg1#+Ni_MHjq!aZZ$_Z z*>YMIu{uduMED$*o3&bYrRDUIs@u5OsMf&?Tf>jT@b*e|l=W&QC%@<7U z)N}c3Qs+62oo5~Pyba2iPo0z-%up?*JwkVanJ?BYbUT8{AVlvl+KqEXtt90V9znm?6u(6z~Hd(4X;5f#&3{H|x=C6Hpx^m6U@xTedqwI^h zn&(ocnn(YFa*o#eBF)YM7c+f#im>Rk^M+b1WTwIa1FSe1F4dEua6(Al zo^>K#3f}>DwVUq)XgFSY@Yx!Aw4c>z0KKzfCzpslIZb#jT%FN*!6JwI_}Q(41stUQ zlAtvVjoijNb`d<_%NAfBfp9;?)PfS|COb0)|6 zL#OY*;@R8L6|+yjYKJykzewYChw~^60GTV4WN~jKYA*OvSa)!K*qRHEF|2mY;0B2g zOK}XoQc%EVlJ&C8@|4=4L_@^mfE;Om_EX&R+~tD8HqcSWPbOYP+rfs{qlXMi$-kt+ zIU}&9zhx_a(b1}+Dmr};^UA2Gu$yIh;MKPK4z_|~R~97#z$o2u`^(cslBvu9Z+Pe) ze;6R2Z5m*6f=Ja|v6(y_9FGeEl8}2uv#}n6incVk6@;0zfWPNmeo*h1y;pSLUxB&JMNB zn=4>qd{4FI9`y5mj=O6rBGLchV|q>oga@6}bBax)@gBj#<;I@#g4Zh(1BZY=et7lM zKWeBxpIbi=ey`!6?05ty(cjW?(53X`Gzxvdy)nm+E$^i!Uo1 z|N8rP=cBHgyu$pI&92k@;*ynsgfvErJE(Yrrbo=DJcSKo_X)D}5U?}D*ES4o@*N+T zoVTChsKra6y>fvCtp*KHzkk#<`$%o{*6@-8ZLMtgz$2dj_M%MhV2WjYqQY>_;0e2o zS9kQy)T$wjzl@iHC>HO&O`6Z5#zued8#{FCYx*2UW;unVM3>x?y05x5@+jaD*0aA- zEE-W^+7BzivCqW>_JsWlP$1Ply#{U%Xc33%zdR*z@W=Rq0^fa=r}P7w%X34uzzKs~ zjzOLD;rz|Ao7-VVyOgFRCF{|xexs7PqQdL-N%HZ}^**HMx233mKZjzb(=hLv#fkZafm`D#L z^@j6JP?F{qdg+{BL4iOI#i#-$_Kyt8H2(sdcHqj8t53~TYIBe`kOX@fYK9gRuuSb| zjfq%}d95_6|Ifsnv~NZ653dW%dBIB*hG>sJG&fjsUI+@YZcn0qBliO}q0{Xv8&3P4 z=p|VH_J7X;%yI^RAQuL*2K%mQelcY>XqGFyeby|L2U8_Yuemx}eVn9D7&0Mb3rMo= z@BAY5Bftf$7#(SRh6u&~G^_i5`Y6%l59t9wL6%_7_08T*7-VQ(X-O8a71z!r62;6@ z`0d0thot^af2)5^O`lEsh(hz@h4 zZYT`4Bcf?`{kH#*ekMcsE{A(?sG8>2QgztWK~p$I?xTp@7jof_T@zB;x6H7A9Wi>q z7%p$G$q=inpNd5%1^EcjrrKE~R|~ifR$C8JzJ9v&fHIN-*75n4Wc<`BxmkZ=49obG z4z9yRo!$OK#0KZ(UPoP=rntL0;(o&!F*f{Yv^I8;YI7ikMN4e`4mIeSEnLrV=&>Eq z61&lFRH~~EXMHw=G?~M%vh)%wl#9=ZY!JjZJi_vuOZ>|wLN@-K zx-hCc?bGS4<4fD(%y5L(>ESc*xggUULg6XVrwc!#U6UPj3oW{BXnVvTzXaYIUfM5$ z2tIKW1Jm%>9254pnT*@@;ZwU$2Dl$EK401j^k^g8xGTh<>o7xno_yq)Qa&m1MtTZt z*qnkDel6@Dl)hBKpX6LjzvxuYjL>3!YJ297`nG7U^$-43-FujZnq`YT-QLjrqPq=U zohZ3q2jfwMKEk8rhbW_S4g#_Y68qH8XOO?fb)$f)gxHE3fs$ zqG?WPyI}1Haoo9U(z$9ux_oL)>QRbSQ~6w8&IGm~X9OtPWy;0)qQF4WcA@M{zSPXz z{2~8jG<{c6nZXy8`vSg&iU4bzSKQ5LXVNeJQoOhCPawMA_#VE^R4HJ_K&EHG!Xfa< zQ85Yvg8Bd>L;|sT;whp0Zf%_ZO!l+C1|l26&3$bOFzRH{U(1ia6c_7XE=fjCa9b5z zGhY(vP3MmFyoK3ZvSSd=tZ(!!1D4FwsLW9VHlrDIZn8W5qt@5q%`TPCPa^CzAZ*o& zF-7V(Xc%OI+?Rji@EzUYB-mV``Gvy`0ITwK<{>np?$4)Y$j-=$=b3x|}+0wXBP$gz9xs~8i2WdkOnu zZ>icxhB6TBN`ZXDVihSp(@Ss`h>sTWSJ5eZ2g`*}5D|?Q?+ZO{ykbGY$}0f9&~$_Q ztCp;ioM5VmrQy3$L!jscg@>&kx ztmJ7^l8nnFMQh6z9u zgnS@4?~W1yBP|EmC)ln%0^dg~fyzS|7ge@1`l&$t)irCI*0YwWC#lG`pbK#PBU8Y5 zI7<722oa7Q{oDRD&0W(0eG~v9;C(mlMOPanu|vs1&f0{4CvE^!;*B@U`eiNr6?gnz zxCj8Ar|E!l(R@%R!k-A-RJJiDr~;cS$H=F9gIK6X*T8L}JD-Rl&B_852?!=6IW63g zz;R@Rw9_il7|)ph;%+ACg1eAYikFu6NP`2>RUs^7%`)q-FX(PhHlg2T_UKG?n%mN! zXI*n5jvqOKA2dkll|dN~;fI4lS0o!SsW}w~N}4YY>?87o&UzZaCipR0V27?=n?pPb zff_y3ykn3vZ%gN2Qt5cg8SbCHyJkKopzP37@=u>MlKq))sO1C}Bq8ifTtisyVPoWd z!%thQL;7a7L&*c@4SfYgk^x|=7xz!C)mI5E+o5=i@VK2xjRs}8d2vi|9K z@pA7}GmUYDzb8iefyBdxUoI*nL5HLpV?OOuD;jE$sM4!uKUY6yzPh zolg@2p8LgOKe^xXpgy=z`W6_oII=%JW8h{_J@3a#-{m|G5}iLE7Z2q=e5;_istgZ% zKdz*xBgY!HHSP4bH30|+oCYS9eN?@lhryzXir_SQ)q;#xXW0ThWGAK|iolXL(9s`M zRQ4OJPhX|UCf9_R(|G6s6##vIDF~<-9lwq9^0h$kkWWvyR#Ma4VN#h{Kus>FxXo&Y zzRf{!#afU9JnC>(#ffJyQG=+xzfViM4IGb5ndj_3%Ax}q02nNOhfB`>Ph=hyO}H?0 zRYby4(j`giNs;Kwa5HGUdM0U%NmF3ZhK>LY595O$R1yGilmOu92fZp|X|qqvAX;jZ z5rNPwA|k50+b8IDx7PvYRe%VO#B)35{+p_5d)^03Mu9f784VErvv~NrZo$&CVm!u@fB5M<-9nEcp5@`SOu^(#B*zNu8!t( zH)#zNwx31ng8b@q$3eaE!LLr7cZPOn%C+gdzNRq(+c`{{xNz{evm1!+y)P1h;o+vi zKrUISRXKD0831gnTxefR znQLVfv2@43vv=bHF{(heq>qr9cku;gGS<|+#vs7Cpv1yA!9 z3TF7gZTNh06_W`{^cnj$I?%L?+ktX$G=udmI=1&^ZkWOE8C?~QO>xj@I$Z`eIG=S` z0!_UtYpYd*5h8=0$OQ;OeTiULnNgI*1ARYd8`C=i684?kOQEd?@<3z{+=;_V_sRnVgvwB4fNrg%YXiYWs6V}Qc6%zSZ+>%xBo0IF5XU%v_wvM zdMXxCeH5Lk5`aQcs{CY@J4pT6&Xcra6q73Irt^C4eUbLycbIrUpt4kG1=!y*UAt=} zBc9`nGNJ%-TEvDoD9HY{?r~-c+o^AR-g(=|!E$ckVcy*BYDW(gc8>7Qb+hF-h1giF z3gsdYt>s+!KaL#jSrxJCha$%nV$dM4j9r~wnJTpSh_1d+Azi^#V4AA;(Ea0vwfQwG zTLS?YNt54=WmuWHwaBg|CXx>*;Ym1Z$FtkFeG!~tRfO%|c5-bCGCU_4DFtNF(|{8U z`u?!~@unq!(HxK56lpyPe2_}tCYtg5Tz{g1c`?BKW>Q=-C=mwt@0?Ljd7UD!l(y|S zjul|jY+-~u4$sTQ_`XEMb_huJPAr4WmGZN3o5y&EQdNT-V(Q>b9#wEI|{{W4aVeUhaxX23z+NLkZs zcvWY(8w0|=`squB!=i+e$LX7w`6_1|_7Ha|0)C*?sGV2G*CTSKoLaa;3hBHzfOaDh zPl%vhp=_?_xbyL02(dQB{;5?2Ga$fO%=#AG0rv!}U}X7dYU9#H<>Gu3m>3d39}l#Q z@n70a&nd!=qHAya&4l$DM7=E-^4K_fSh!0`&kXn!|5kVP*TFQ#Q`2OMqmLu9CuW?4>Eh)nc)goT;v`eIrFeg$;fw|Shlq4m4Dfu zKO~jdcRlYhiD<2|8H!Vgi~0S%`c-o#7;(r+mRbmsH0v4>>$l*yaUsvSGF~; zpPFN!-!Xi(*K|mK6cJ12dkc}g%h2(-qT;dxTbawE*$WS%K^Qk)rJ)4D>Ei|0(^5d< zQcUCC-LD75gIw$sTm{obeiQ1f z#0H)h@?aWuzCVh|v(Bv3~-)v+}4>WtrmZQ=R_4{w$8~ z`H8_dss^S24_t>UO&8=Q8LomW_+76Zx83_F6%;$p?zy6Xf{()|2&4vz-~-{93V;8-U+b#s7$ zHLYr46b!5gD(oZqviETt!6z3-BUvw<)WYJyz~=A69=!-FM!hpX%HU6oD9cI447R}9 zOQh~y8XlM;*RGf)*>c=bx%(bHU`F3!`4+ehTb%DS+!H&TU5!MIJ)YDA$<{-XfeD#m z;V2?70D>+b5he9xbmUy{+>8KiL}DVl%k5;Z15D(NjHeFn#-HElfq>3IlfskKlb{u* zc|#WVKq91lQm5|rFe-3izVH?J19iANS9dYFl)PnFX>)>NN=QHw(b}25g$AR0Id>c%`tyb^X^(_ z*3=h+aDMm|a(Hf09*UqMhyA!S_i=ZC9??>&ZgF!?;uN{mkE2!(3E-jP@Vd|fLL&`i zBhC#8*I16RRQ;hU!)uQnQiG-&_(MJys3$kha^4*kRE_7bvgI$;oG$fN_yKd{?~j(B|IrycO3Gxp1}a=`2pbP z$tcQw9bV1JIC=(#AVM)J`8xFgAG-UVbnVD-ycZ(MQ$mol(x|pryZhE=ind)vJ}9@` ztFhQL;o>(d$Z;R4TyaZIWNd#bx?CQmB!aQnY}@IdE5cwzDyRDfcXBAH*A*G)REjv- z8lR!GQ*Mxf(;&}?vA%wC!2h|@^3+7_oK*pH{d8eYy3*<<3ELlxuK&vuH1)Vk1VB8d zMv0vTcoyh+m8TwjIQHAE?}#pzU-0g_Sl%7o-Zt1S;6bLbGj;7kr6RwfP+@TYAZ!cw z@h%QTz{Y2S$BSm!`hF075H73RwBY{u@OaBz%40Y@^udnM_qdLp$fr$YkeqI&+=WUb zA^_(RUvRbyy@1uoe`@#7?+eKyRmLQ7L+Qxjb-Qmd)_!i1oDRARcPe-F1~`Thglqc& zt|L{p3@g;1DOlHOO*bSq@A7pivqu_DEeqb8^|4Y3_8;6&mu*q}ELD@RP=bvD$vxGa z8Zu&*L##fPaN8xE%vlvdPS6k)6*)Ay{))ZU9j*R=jFJUB_2h~eH4_>!qPOy(ZFzF= zjdx8is}hv+Awn(zpPwm#lopAmh{d?Ap3sx$!Z6_}-#_$u4w&@aXE{FsNq7K|$insXhLu6M0xx zv*vDGH)Kd=>e9tI#n!VY({&aw4U=>|>vmdlXq_57YjGYpQ!w#FK=PX zbvtkyHIU_bufO{CO&E{n@Q4{-t?3yu8K2=LbGtoDV?M^n8_UD-^q)*|J?eNLB_iE4 zP_O?g{`i{W%=t+6?hfi`q)B6vUbQu1Nvd1BCX)rK*ZiOrY~sl}oTcr`(Q~)gg_Fx( z6q>EO6LoHXxCNQ8Sw3};j6h%?kku*P-6O;?BvYgvoUd3-_lilc9xbK}+&`FJ9L|PE z^qLtQjgbU~QiXjWrhFcLxY&NYpn63otBUb0q5r7CqCG4Fk*f5tYqoETDwrJ}HQWEr}rGE@a-TPAcji7(i=6dfXc?)}d z*_N^dCXJLTp)uC*AL|B2Y{{yYOIa}rg|;3omJ!{p{$xJuZ%|2}BNJS_4wu}+VbUVH za8Uy3xS~RVlxk1lGt#*b(IV9~D|u!45-Gxhw>WXbt5uuU2U{L$NL=J07^WXkno`}O#%*)iJy(d~YD%={S9x9{gmfC$fHk82 zF290VE?a+#a>o%0>iqY?*d!mi_W`Ca5nknL6o#L9-e7#n)cG}akP0_lV@x;qneGi2 zNf;4DV}t>AK*{cw218=ohjahIyqPSrcRejW{o7hov+FEob2w>Vmu37L&r#Ew&Y&A6 zi{K`Vz-i)$iwK~5#%@&{bMv~qc*2(suA1)-$J`;gQW<4|a{5kMopa+Bld*1z20z@> z;6xhiQoH0Sz9+@>&N{zS;j$!$QGt4~ebcIY=CKC@vXT ziXcTWrSYN-vD#=uVUj^rs6Wt@?DCF{Z3S=WLzrkwK42&EZtgP z_N&ITU+!S2%V)YjCSmaE645=8-W?98I~Lkv z#y`-vI0Z*opvXh~zW#>K=vVsTaZ5OEzTg))?G!DCPL^%}Md<2@YfI5Qn0hW=zOH8t zvi1Z=(x8CF=wt53VcASk)o8gphU_C*F)ulEK`K8v1fpz~6By_K>&K8sQbv~V=KUU| z3^YFChT&1t#<zhH}Sy=DoU^WR9w&$8`9RAhJ4f&o2w_;$IFU zuXQe(Cv%oX^>0eOR!yhoT`!zkAGypq>A0GjX))$-R>4`**I(8}gwJrke6FWa*Eva6 zDT^S3;Pu1lZ+=tcsBt6C+5a(#mWwa;0L2dFb3W$;E*lA3`fKva1aI#S5+|KKQl&@k zutD+7NbB|2NUDS$56?S0$$n?)BI7PuV@U<1*!Cd(v19)YfkgEp-rKjWqgsIC_DLDf4p ztP^-_B*g5|-+mbtJv>K`R<5KcAs^DaZzr0rcY6b1q-^e_O!*!jDI&|{UT^lCP{iyL z8OVL&kTkwa-c#yx)rGctUA{93?rjbx~hEz1xAYaDF_KKsMG(T zu7!lr4Eqc>|AH&!7PfG}RGp+X+?~ZRf`uZi!AmRqMOnY}KCoIG(`s*-H=Jl*7-Um) zkn{&C!GMFNUJ>#2kW1mnl!Ug~>{rSScF$pAsPTHxN}KtsInS@@#HbQ~?u+EQ-n_G% zeZRK8!FW);dA--8uY7;si=~;50O0LiPXx2`o(KXnHpK40mXBW(KPbm|?VT!Z<5?TD z5VC7BtN3PAR7`K9@e(6A2*6{E4qb+@Ec@I8%IvFmMu}dx{=7b87KR&GY2V|LIyMyh zryEUhU=julz(ZBN06yAxHY0}9dANFe`$#o4q}ZG}X4FC!3_8~BRcfbkx?eaYu398SD>>S6-_g;0>e}bq(K?iforl5foo>;Tk__&f{a| z6w!2Vb|r?wQ*J-?mW+jZ)xx0JNsflsYFz2;Y|P9OaOVCA_JSX3F9DMlms?A87{ z3iosxZmibogpuNMpP1zQMWO+5g#>r@3XOb@j{SfVpNo!m#lL3(5JDu2x__6;v)gM? zU`Y{f^!ZiB(A*AIFx_wFoq7JiU_1x@`s2K?nJ;;lX$CUSvtGF9!Hp?TZcd82_B-UQA7QXyA zgh&RpsPGRu_2C^BB;d$#V_q{lWQ;73%-(^+KQ`>YdL^Ei?(r!FF*|x@Z>ML}m<0(J z31VrF&jjCBkfG6XYRIZjcT~k`-h$TS0UA00RQEbmz(F`vaBA_90G4}reN?`?lNCEw z1(^Rcnf9cja%80Z<{AwfH$2A}+~}5XFT`$5!Q+Zcvt*~&p!Zb^lfzZwu8}nUy$ePYbxgB1kEP!ECnY_2?tNk1hX>&uW7_~E6H~cKe z?T!#U0YpkcgH|~qmQV{_e}N}bD766Kgi=gH3duBX$Z&i39f#`*wsYM~{2=D1b`s;? zjjWCrJJNu`Q)W|c6?%pn3{QrETP+lN@B8ft9$~J*{hg*K3v6i*f6XoINWHuxf(=K? z-eP?u%)4AGy(NpMOR!H_{%s`|Lnl1d3}rl>f%xF{aZUB&R04DGA$5s_oh7tKgHTW* zU)ilWjWOJOSIL^yAjGeA8KqA#Po(&!SD_?9K+6gqJQ&PC)gIXOqn5yo_wbKaA{Kxt zpYia~JT&v=mVJrv0D^oA=tmuy->i%6)&IvJ`ipQO@*j*n5xtf-mK_>?4GxhryvLJ# zhk}AF@Ii(V6vnuN^Y1YU;dJk@vW=l(0#>k4zRCqZVyaK|)_xDBW59~)l@M`)1`FN& z1AZ?d-tnhQn`d6VdQT!bgyVAB}qtWlL5FzF=oWSum5`cua{s!{->k<^Ob+Uc)YD(Y1>K_-uqW_!zrJLYwNH>$-kU6 zrd^qc!dnFD^GWGGhSPzL;)9GgRi0WQeabCwtETz>O5L7v3&O&#ATed#>jH(`U3X+1 zMr@RMM&nKCa)X=P+7u*^N&TS4hBWiP45Rn?(aH3-mH>bDXR72Y1K{s~MNmm*3Pnwl zPFe&m$3`XE_+yNbub#W0!dvhf6cQazd`Fo_c}L+cpd^I*;$v@(`+^wfrTf>GS!qq_ z<@sJ+cxbY@SjGO@%&Dd;80fdP+83p&?Ijp|Bq-5VVzTu*h`4q;5&xXkiH8tOTgxqU zlR9que0V0FB1aBc)f6%pi{#v5|qE6(3382PGZ_|KnC@pAeRmiHtiQB@B z5a$?E$*@D}W)Jz>%lmhK-eZ{cid`iBOzL?SMdIjlo_G;su%0to*;LJ#U}XW-vk zN+u?>L$-MBYKe4Tj{}5024WPz8)HH?Rqht_Jk%>gvB(F0JZY1*90cqHY%&O$Bk*F! z3>sFoHog75Z})W&0vyZ?u8S<4uBwqmnk(VN#{RXQH3u&1Y4a=WP$4io`6XHo{5KXB z_8O&F0Udzc0j~Ye|L3$UttaqQro3MZY~EgKZXONM#SdV*@NkAA-NS-CpQ*hv%n9{K z2r-mm)*qmOJeqN0@_8a4gazs>zsiIU3T*G$JqN;3q{{=XY8=Db@`YrUW__z61KMfw6?4teQS42US2I=nZE@_YkMY>bE8>BmAlQ)B+L1~n-2Ljn&q^w)E{0-eVt3{nX$*1 zGG0;K@_a-cf4Y4?yK62>#1-4;gu6n70k*+|>#AV{5_UB0vh(*1!3S{)GNnH%{N_8ey5tQhGpu3F8942Rp zMqh7supJM6N7v7MYkQeBI@LBkX5|>*L25}GUsp$_a65jBsG<`S>sDl{d-d}xxqh`8 z&A|TUv+~PXqSXgJ3e-%iw3ySaq0u=o5Y{A*cSn~w9e!j?5s1t)@1Ri4QtEdW>y8s2 zXFoH}$*r)tSB3if6{s~)7QG)@?Z|xiL6a_3#CJ@}n#cY>m!Jc*0PslBfc%rpT8X)g z9ZxElF9>5zI#CC6K;-XaVh#cFoi`I?R^-0f$FRMP$aco+K%*uFsPg|tu=P-U+R4QG zlt#!?;5(oDRp+j4+1J%!4EEK<>7A)3n?`1Le!z%50kNzbZ(gl*(IFAIWPPPx?7hIY zgBsL$^7_4ykSAE5pgyE~`?y6Muvv^8bKF5*$1S34!5qgr*R#4s#ymM_$nUNu8F^L8 z+h*2A(e8$O{hC?okIp8PknZNzVU?D@)|E<%QfjdN={T(d{=s^ziKnxbR&2wFJJN|a z2%n0yqjk52D3_=H(79fp@BX`NTMw3hi$FNkkTv-fh_Zn02yg{s3dtHja;3wB)idy+ zXmh3sk(MeueT@Iz)9dfSU48v!fe}jr^5$P`wU*=M1*E*Dzw-~ZoIW!)jpeQao8m{* z&%AN&avHIa)Yc)n{ocV|6F@d%7p{=zoSTw8&+~lR8-|5|?MK2-o0MmDOir4IffvWL zweu+8c|FSw<1dPRabe3o_;+AfNFUmR3fN>D*bKh#OCWRg`@w8=sDFb?nRBlvqps9y z`|%cNap6B4y)3&0Zf@tHJ*r9`eyj3`E zOcsNDzCS~e&+0-UdW4SytoMr-7qw5q2)r(P+}rX5-InDBoXK_#+EqIvpiB`gBx|An zU8m(k1VRJ(WQ3a+s}CDSdFv{4b(sFTg#NPzo6#q4-?a%*!xA+xU^Otre^8csVF088Z0g$rG!lUp(;F zBo2oiZRlr z{VkZfp!)%NEXN7X^Xr7g|75uuDzblf=7-vwNsIF{EFt~V>+P8(-P_f1mJ|W^m}uP_ zK1xXFhP=RSlcP3}7*-6Kv6*q@{%4u)vtmcE?~jBg%-CzmXc>ryt!*DGfJB1vUYqwl zQT+IuotuN@I?UhG;eZ4_i{AQ|v6{?2+jc+3P7=?0??FnL4M3QXvli9|D#FpuY_Jz! zwe41iQU&W6PniXubxpFe@p^%J3riq}*;Lrhn6VU(L#(Gd&Ta~?J9c`;isDcjcUK*x zn}(W-E<7WK`T6ghE>M61S)ke=`0vW#|L+1@SHFL`v&6{4%+EEjlEO!CyJ74df2N`cnU) zmK^Q+0EJJxYVTgZd=jR?2=m05cZV$ zW9=(@Zsw1+ZawCe(^?Qy!;CnSK_Z6|@jlV9lg8S=3DuHuw8TMRRi_6c(EpI{{{v&( zUamsP@q_Jgkj_QXM`1lF?7%RqEH3%x-u#8LsuFm@!RpGaFs2yLK(vk5O! z10sBO#zC>Hr5g_h1}>qno+7g#!_iJ3-8!WbMs&A7%#XbP0Vx^qKvYPM>OSn+zsyj? zmZb#V!Fw>1e?ktA4uwQYB%)}WX+bTnUthZJT=!qr8za2$$P|!+9#%N)F!rel~)}doy2HG#^}7$IU;L&WEr>FZHKI7S$30FVe-NBtt9r zE8ve30u$j6np1X7V5mHsSE(s0p9}Y~ekDwN%NT)Y{jFtiMU`EsRz&DaqJOhfAOBg% zt9wb#;-pYRC~>663}MXa#FZbs&!hPE(|GyzX1VjDtceeQ(!{x`Qc@Se15pmJLcoBx z_v8~9Y(mpdS(0QU?!6hM5g~PP>bl-tR5nck3a&=%>v@COp1HIOXpX5e+r#A+{Sv$v zJA=CV_d$~pt~)yT&Uk_5NfJQC%kArgENmeid9UcVk!DfvKxb9>Cu``iREF%EaM^7J z;Z%G~4np$P+4uRNFfiEnmzJ@(l;@hw=_d-5k#6WhPyb2ya6o{}N>UrR1E@EcJT1T@ z0~t$_=lGkJDPif_8le}ce=+=X-*4)?bkTxLrrBY~83b}4Dt)8A{- z*6{muXu}*fIA}F{pI62^sr3r6=(KRU3KO6KkT`{by2_ZFvRQ<@oT;I#4J6)^-u^-T zJ}ROZ$WLkg!}?2u{raEyd}Z6k+A@$Bs2rx6+CYY~pyQ4I z#4FQspHKvRLDi=Zeq>pNF%u!2O+-L0C-|Yfp?T7&wk78spGBRk&zcO3?`Ii3w&9AZ z&`pxv|GQ^@zJlIVZM@>g?DLH69l;Cn4@y5$BG@Gseg?Fs^*0T-NbWBopK3-O&J834 z^JV@p=t-afmgCQ&Q+EOWMP8)*arp*s(VhDPz6=a52;zD3_Iynm@h@4JVYviJ*TM>T zSgs&c-<7=a0?{vVo?JM=4AF8j+lpW+mUhM^w+Fov0xSwun==Zq7vIwLs(IU)Ghk1y zYk+*XetN>h%WdVxZRvJHoqGzrOF-C?KUj*0A2lA6n0T=!H6>3m+bg=vT@wU=LV#2Y zAN|9H#M%@jQknS8Bgr&zK%1#Aq3RMydr_m7t&-R7Wt&?u-n%f2`7eP-$#AsSuI-=x zfw;1ua07_)f{}`J&NF zP|O8uE@w?fqy~aqjEozu)W!vT##Eth04mhZ)*YAbx8ML}`98dVYEZMSU$g0}OpVdf zsb8}`zBkm6aK9KoW93$?!yca#)T<>U@BE%_>nUI{P7;u+Uz4TLD{*UO^Ej6aiZC$# zHw5Y=r+8*ZhE2R>n@~PkG=`EaZFFEqd@w{RQ0I6`lyNoIoMS4G`o~z#zQ$;funRn^!7#Mi0x{ ze}2alBs}YQ$Udk-0rg0VA6M<_YAFd0sF^J>>;FO` zrp08o=Oua6;cRQv&+y&*j&RtZ06PxTXo9tqQn2&XY_^HWqzE&AQKC7)eiMh=6Jt8d z{Ix+}mx`KW-5Ec#Cm2C9c75g~K(e`fbwtyt=pGPS2B1lzE!L|ezcqXEe`MJ&NR^7b zL~7bel_^EW+2l1>xVMjwp9W&m3J4|wJhy@O{BGRsU3 z`9^bxAE0qP3CcI0e$U5reC6P2bivy9R_|C&bN9`NQ7qlMX??(-`2*%E0VtO?_(NJN zPVEUcdT&m=0bYIBvfQc9g~yGZJfT2G8;;6MgW+2b7fX`#JafmL(d5kb}tF~4yZkeZh9~w8N(&oIF;AHYEy&jac6$-muBV9 z06T(2`P~l`LAmN*xa#;AFyA{{>aT}USZb9Acz&b-pt;9ly@i~eABL#2g$85>VOueF zACE&7P=IsGMUc-BJ@G@uQiI$7Az2j^5tx9eRRBWWNdhatbr>J7Gnvres|0!0Py4sW z^@ldi&(!MiUM*Ov4&SBpT_%kdtP5b6^bZ-$?f5WTo*5Ls@O=&@AF7Dlg3vT` zZvW;@G;A&kl*FXyr{ofps-*tfbUcI{e}A(6&Zj_VKc2y;JBTAEMNs!gC2Z zMbH_WAVi^9HXQ8c08~3E@B8rH_OJ|*>_(}#kKrwph{tJzSUw&Cn1=bnFNPAZ4P1yg z#t%4}3XNw-R=n3nr^w^nP6sHj{l9AMzWi{WpSiAmP zBwd{S>Vi4@;0H@BH{5D7(f%lH;GJ*K>DJ)_X68G&oF0+@vc*#`C^eb8Rrw)b4C>!B zQ+q8*o?T?ab()X%(~q9_Q;_1chgVn%MC&I}m+ZR}pbNFCP^`v+^lm^O#?;x7@Pyf1 z+MD**fXS*|pVzSOxgv}6G`rxvZE;U;AOaTO2U~O?rY!u3<-1zh?p^O09MA(|k@-yN zTk&4`mbNR*@RPzR;=~HfB4U(~X-xm?NTO8m=(wB<0y#! zuNHvfQ>g^t<4`3z4VMh*eIzY8|1B%Ckw(NXDqM9uPLO1WrdV7G$OLuba<<8X#6L+-7^Vn?94i{2TYOXg@#%GT zXr9YrcYS(G=eo1q?u3%}Z%~*-qnj-#>0-UHR+tU+=YjV^XNd!y_@7a*HU}+!cYWktIH;Fz5UM z>Qst*iOS8oxSm_%%6q*=10hA12H3Y6mw0S0bJLdE4Y30k>>_P#G&^7#;Pcf+7bxcf zd`bc5Y?aR(JwK?RB4~Q%Ve>uBJ+!U(@eybu=g$>)?EWumx(9E#f-6mWJ?k&((!q?V z@Y&`>pw$%o9^WmOqhl}gSm58_%nVrf*&$Hvf*bx&mTMs9mE8UG3^(!VrpngYQtspV zS7@`#_svwIS>b~o3YUn92CX1)D{kOkpPGh(D3VG!s9xjvVt%TG{Bq3=gd&xhQigpV zk27q*Q-+$3*Hv9srb{C3oaiR^I5&=jMGI#$-Xxhkt9D_c;PzJSVjpDZk-9PXWMU|mX_lIeY9)kHjRu~EzVri4kpuPbvlBh0)!#`;&9zm4jZxM&<(K;H!g;XoT>H ztW5&#H{lUoR3LH05gcdfhgWYT1Es0)@kS4@88Q&4rc&$2bBF%}jvt_Z&(+B~*b{ic zi(NkRr3`q9g1d&EvA7%*oQ%Fg;k}l5D!(6ZwD?`(bh%2indAF=V~I>xR47aN5tC~& zbGT3TRV4&?61a#6dX5>Pa1eIEiz>}q3L?q0Vu zWKN{kETnAt(v^!bL?jdfZYlq>aZ|KZ%V2c}B}j+dyOgbjmoLzJYjdPWr|-&q?V|xP zfA7z}e31uXj%BXKjt(-|WDxdy*%hN^T)LU4yJg6asmoxvpM)zQ;*IO+vv{bM3<5~6+2UB%(nAz?EZ{X3L^eJnfJ5^qpDaZEl zz#NFmn_#bSUCmPW-nOI?z{~xCZ25I@e^>Mf-H(j34Etk$Tid?{kiqV51~u-1AA%&{ zt}+r>X*$#FjVVare$gew91^?rcgQNj-JbdLKKsgZ1es(8!}{!>bpoe9n3HMyBo6b_ z*NGt22ST!H13BwgR}1*nj1RsUWz=W8^;nWRi~u_rcZbz!N%KT1q{gKde&R~p&)w(E zM?Nx`o6*nF{U~gX%Mm-Tf-z{8GarL`uw0b_sz=K#_grI>mZfv+dokim_dQSh+GV}& zX-XxgTMzA(6Udn23$xSOHXTuvc%$;Wo<3+z&fNeV%o`h!{pQD*5*hCd_3Ibp9LG{~ zfo#R;=M43wDm8M`Pu+xC|2b<@MLqd6FQsd_xRkZN#xpc@+}?>rY881`TzNTAIs8Fj zSnM6jEApJ&1{_OzJ;QfhGhOvUr;krdOFq|&Ue=-)pA?s);T&`y!IsCD2NodxMU|iQQ-o4mcPH90HcZe_2*n}(%fEAfHDbV-eyPuo$bN39VX1M(QgV8N zsJA6LJQh3m3*o9*yDNG(jWuW|z{ooq$x1`~sFw`-;oFCi!Y5mPs6ThT69Wa-ned4X zjIh?Ruq#}kox!w8*qOtv5RABGP4i8VL~OGUU!bW1845HRMT`odP5z{Tc8@w&N_5yk zcv3z}^qD*YQ)Og3K{~{Ml_&VNIVu$V#k9)ZZk~8In81}MSV_JlG5gtA^!dvd zFurX<6%6|2Z!k8DVm;2_;e9hg|8kN0oz#+p=FB8imq2DD5=Q_17lm{kLPNvdy=w?} z;|rebT;z)3e4|a>@aeqtsg5*$q0Qe7;0(=xl!2KH?%Lh<0Vmox)|lup}}mTqvt`=nf>~)&UqTmIjXh1pwfh_L1$SaNX5EQ45r9T^++wgzsAt6O4Y*{;J$FV zMJfLYxNK&~+CkuPYaqz`4w8agXNw$PmvT#gPUKT}{=T7b>c#m7D(VH|s zT%<#}T5ovH73uG9v@4XO$CCE?5SkxtU+l**8tio7B*;;E%g8g9Iv((@pTEEQ zpRbs1Zf-NUf}Y%~tEJ4?E1FE&i>qB zul@JwT_X!PC0+ar=>k#Q987Fa^Rl8m-UY8)$8bQC-Azw-nc(l97jfQBQ%zq!-PhG6~ z7LJQiv|8g$sI=Zn72n7nB6St701ektfA1n555)aHOj>UbjZX!&rI&;p-<{GQ{KNV? z@#pIjznxHHT>jxW(5`_IH`1bNGgvc^OwC3=gm{eNf)%8(zO(jG`Dqo3oxZ~C{rh=g zm1OK(x!%Cp*{*+-KV0@@I9_3H^l#&aKHx^#EPrG8!bIdLO7^@y;kZgWraJD^TR|Y2 zAi$fgbbnLbwR8&aYc*$Qe|>Fv?@m%3wCu^$I(jI*ToWmm-V$8WI&MP+V$gtS!|hS~ zbou0U5Z;?`xs{92hho?5SC&ph9El+O*vsRa>@Girl??eyqT{5?>fG5iyI;Ps?7YSi zNe*D#FwHH$+3dankx@2&Nz7@lt8;l*`DyyG_Yh~cvXW7o@VaVUPL4qKTwcm6do#)(8K+Ul)bM9qVL zMl4Qj<9zM}>(ujmdZoxRHJLE@xJomdtK*PF>({U_L}VHtU|g?=!vDkZ{hViL0=rkdp3@`>{0>ItI{mFlm*X zCa?3MF9Vz;RZv9oK)y|r>{Rb3+`WBf>`3VE`b#`nJy%${fk;Z)ewkh=fv!M@x@S47 zitOI)uo5#svRjX?N{*d{08)zvM`fC#y;#O!1a_Cu-+cqFJ^oBxV8)3Q#-GX0miI@R z80D$4p_bFW%YKtzjd$;D^eq+++~8M|k+Jm3)yg0KPav?QCt7^KwVY!RrS_8#f8d~n?68-j| z)vc|rEb;K3p`k9-7Vn3Xt1FY!wa%QpJcA;O>C&6~`}50b-AbP)Z%e0xneQtrdaA{0 ztyhQh<(LEna;~o27E{Ig4|kU{2uMhjyu3+>c${Shod}k09vP<0$?A=+EY%T6 z@~~o^c6TT8S0!3KlVI?QUuyOURw4dgkt#J6iM72v6V09^(JYxz{i*UrTN-kdoyTT2 zM-WhT7?LwOf(FL__5e~PF=l2qolC?<=<(Kr3ad27%3k+Qp*b=#oq_mxf@R9{1`T~n zsQXC_-aRr^T+NcSq+ElZ-h1~2{d=~-RDe(>4TRx+{lod(@}uGPQvpIs^H{RQ?wanuWhh#q>Fnm40AhD%z7EgY6~$wvKlBJlX!} z(*9j`l$Za}K9(7&=lwch{hZs$UFWuTaP&sRR$wGcq|=Fkm_MvKbdjeSXyzriK|gkD zNX@GN=z+L73gMQ-d9UY@1!Qo%Kvm+uTV?e|T8@F=YPlj2l7MeL{=D}0d+q(}5wYq% z*PBy<>e6!a2@R#5e@)ZQ7Y)wv5!fsK{Z&8#b1>YwOQns^^9Js9}T^ z$ZL<{yn}{E(4YH%>v5)T<0CgHPq;;{kX8Id9~R?$dV$Y|81vm21uA3Pz>VQ+Ww68Y z6@fTA#`uvaaU>y%8rqh%BWQre$|@9dE9`nNPYpuY4zDE)LgEwp`*GC3U?1jQn)MG` zPgMTzghvmRYHx4G-FmQNZha#nNQp^t11p2MK*$);0suli_*(C)<;q9?mwx6TPBb(> zpn=Gu@Q7*^dN>uo1w%TQb%GdaG;&clbA}lDVHFdjFJsrCd=nGJuMJ$H@c6bt|030Y zfkp95{&~ZlapCz~2vl4OujSIi86>~BU?2^ZzDp9s(sYg*qK>#i`uEs9n*gDywdeVL za>sWS&Db@%2Afl#D9u=}y6RH+tD=k1CF`D7;p^!1zB!8YF-6^On?frs7?9nK@^dS^ z9=@X0UuONvO7YUy17l{0R3xy<}j?NsS-)w-=j+R1q}rk z^c{pkN6o50G@<>-bZ$Y-&>YYv#En5huCYdhABKWvqC)l=bCbKF{VX3Efkvswz_S;YebMGvAvdrDqTj5JXt) zH>BojY@$m}PHZt~XlTU5#PsNBX%RaA`o289zV23)m3bR-Y)`Geh z&vO(F@_y=%Z-))P7Qj(qVa!TiS2%Qv*eej`-FPzQ1-S#LN8dm5tK?Tsc_va|gDwig z={tDg&f~PnuM~g-?B2VK81JZgijXQkC%}WjZK?SN%%Vv@a%r&+CzAn!=oPG~APc#0 z1k7m;|5Sk{Nx8L~(qo?ZpYrl^cBH(H2HShTSvWXX2v9`wn?-BUd_Taq4wIpXoJdYl z81q}9)sMt(kQmm+<_Krk#s)mLOYI0nOm%=v@0m_gl_xsv>$t-cGu7d|FJlRg#pCkL zvf56olG|(bL4^@dme)3ij08TB!iI&^l0QcY8F*h(cFMFEXS*3Rs0@e}DuhtHYS^bW zBErrpNu8{^EC-%Oho?8PxrkLAbj$(G=-V;eda&Z#_A{4<@db)@UI<}f`ZFoFRUJb7 zxKAf$8tdrbbSIv3^T>Q3=Ju_Dd=6{D$9KtW=~LVut}K>Ao*H1ly8gXjv^7-UVQU;W zwY3NQTuyO=6{*^vGk>)Dk#f-)+2owgD3$x841P)q4!<`o4a8(^ZEvW{Soh!B?QQK% zw0;PVNsa3Red`OU8)Tg|Xx;KF;o%Xfs#Q#%fooB-R*yyWBIt<9Aa{U|4{tFZRe(gm zQ?~AbaAt2Fs$-Ku5gUUN`yX9T#lUar>``K~MAdvz_!whwuVqmDo`?fJWO89AxX&*h zEH%a%-a9+&-RSo*Vy$oP5Bs1H2dscj+Aex0Gt&4=O{wF%RNSfFeg#*jmQZJ!yeA)Zny~~b*OfqWYqq4HH zl(cjv$4fXQqm{qD?6W?jA`*o^DXn7fMkouBjRUv2Zx*qtq~J->J;pu8_+hfwQA9|u zX9!DOM8=4vaWUMp1q39A|Mm_-t_hQ*oy2K~gxv7XvFOPv48Ld?Y_ANAs9&#Oy&q(1 z1urQ?ZtB`Umr&8w9K>Q;=uiFa)A>iQJV8kc7qK~0Nh&sI3SL?S-j}}L^So=lSoh-f zdq^lH(vUT*SQiTRWSW;nOICG73Z-f|&g@rlk^nIofD_b%a~f5#cjr4$UbpR}&mJg&O?g+kW+ zJdtcPqg@oKgKJ7fzw^724_Oly+@PateMfoz7mDIXhbm%AAp&122H)y~0vLDr7e(p^ z>jb&{7i7#cv*#~;5in^VE&o5W?O-no#m}N}J44^|;k04HBy~+zf_HwpS|0k`=0=ATCl?JQDgnLe>%NMlQUx{nH3j{%jK~kYn_+HWD1D4*B^?;GerUsFk4_r zU{OPxb_I5wVY1gyV>^zuwSH&jEVvTUEqN#XsxWAFJrz9iaG{Vt(y5X?k@=du5d1=A zt3rQNGo@eF`0}2~mH{X#*|yHtTc^X-hK{xyAIT%*M3cXiomSSACCGyDWVRf7Ddv0- z6l_Brt16?e^g^UD{3EznySfwp zFh91$oa4nHkufv)h_Kw|`mijh*2lL@&mxDc&jIHD4lbYy(mQ+sk-FYJzJtrCsUtt3 z<2W-syrT{supp87qe7EiY_XpQQRHbA``|z+Q=~Kfhd+ePzgvOAcuW>RZZPNqAzz6U zq+Bs@sG>l#!~7f~z5t1$+@}ybcoMV0@`r|kU(}NM)8{>O$0H6S%t=M1#TjM9M1hlE zJ5{H8*FSwxUD-Jh0f~FI#fl99@RLldxl@^#TLmkFHSP*iI365xEr6|c72yYaRq4P0 ziLDZ$g~{iyPpbmDva3R+#LZo&S5>nezlDw>7BBcxt?5R^xl~s)If46Z$Y^!vpzWD} zkcCCcoPvODTU^PAp-weF15SxL6gTx4-cSO z&{tlLa=_<332X~zq(py+CeiA~(s5l(YS$2EI%z*^3&-&ad;rRicnswjh_F$2>5Rs8 zuLandfzncDCb4iqOm%J9H8JxIuxLKrCj{_hQiGOl6ga#JL$&A?fM7&JgiAlP>pxtT zH-X+@24$7`q4fPmM2r4lOhIQqBpVF?h_DX{aQN4qV}ql_?F_E30l-ltMV)TdiN5`U8fzwsqM5hZGEylR_-{T~wlSJaR0ySW?*ykEZN^YFF+5dpzo0pfJ=pQ%D>M&kw=ZK+h8@vlA^o@6_} zlq$ndKYxOY26xUANq#6I)r;>pllCykD6O4?FG*mh-+I7F)+2uuE`Ln!+%PspLqipx zzkw~4+5#~ciO&NriH(D^;^yJi4-6f#(V3ZVCD4>{&->19b{8NA^<#N`1Dgn5W?E(4 z*1=|s?<;|+F{0J7YKS5-qNqE}H!Kv@J0az;%MFywncr1kBpGwe@2N*7-PZ zpfUZCtJEgu1v2k46oA+=RXWEqv370G(LXXax7-%Rp zeKL%8w+4b?f?NYx#~riWwY8&)Uqt2igh65l51-WmAT%S9590W_Z2w{BW8 zKzO;)hPRsc?Wq-GjXoed@9`#=7HzQ`A2F+fCJhb|14I< zQv2$aX1vc(XCuB5n9ti(8V z@80p9R+PG#NR@<|8Xr|V?UB)Y7Gn8i6y58QhIURWh#N1Fco-eF> zo-QRrlPzyzHP)lE!Kf;n%UrX!o~LWCRnT64eoF?PcL{)Po9N`B#K!2(uNOrNe^ zR5FW?JjNNU_-K5w>``XRHAi+_L?2k*rnGw<&tw2x%$(dmK{MGo*~|2A(Kw6MM3#2x zf6?}xT)ia*(;nTX-v4If2%e-?L$l>R4vR1$I6htIw|D)!mJF0n1!k$K5V}fTXzaHg zZgS~%B5g5GtUHCY{ym@O4|2vh{aE~+&{zFs4vt$jD5(?SFI22e+w*On(@YSX(UhlZ z@WN>qaa<5*br^_4?O*Y$=_D|g8yZ)_9-@EQK_AGwY z##KREKYjV3f4mVvZmxJ5Y;e3Gd#%6T!(V}Hgu_Loi*}!TVqsBSv^lZbq!n3_6%HnE z3sCpSOQ(rTe`fP>p6)HOfg>fiTM~QZgoe6@Q}vGTY}^j~ZES6wH-{OWhB=o!pp1-Z z0wAnHsn7$r?bR>V3@tSud61i~F&{I(gDL9-N?2|MLLzoQm?fKyMH7nTbB);Cn0d?I zcQ8Kl#ASLYl{!`Q`3{3h3_C7}9N?6MnSdeKfPiwl&Mzr^kio!4El(I2^R?am=)%mj z&GhEsKG4mM+YkmTk0PoQ|uV*67dil3mUWcgjc0!uO|_c+A_B1ZDD&%4y6zYrVm z9>}>#`S$%KXhIiB=HW~n@YkR`13oHbZMR0Km5Vt+x34b6a%j_N94%5pRHSBU;Vho& zITgz=Vqx0X6~qv&gS;x|gvnrMZ$n| zEYODWdHm$ioD@yr3;xf8c2fbaV)&tEbHZ5=J5+~;?w#No+>*YkT#$HkQK>N6#h+7rysp$ z=rLPL(A-yff*A=A{eakMYirj&IG?VW_7vhH$Q~}UEnn%qif0KuU;wIk05Ln~_r?n{ z5C>yR-UGRsHx(&TUJ}w+wGPfX&4dpN>s;y$1VH%};PKL98I!wpaaCH|03s>IKtNY# z#EMBjSvFOiqRFt?i4St(JhR zNmLq{H8DRY;qhM-Zv^Ss9RYV%t2M|C4N$oiGuN(**6dl11-!X9FgF31!r|YftVbm9 z4~`ZBg7`)bk?A0njl(l-~iqtBH4}ZC#JC$1Nfza zjgSU})f?fbajshNd(!#p?(uGL`K2~Qjd%F>E}qfku%cA5n?XD^>znJ{h3k?xCx1+P z<@s7oZCXAy+{j;GC-?y09tSB=wxa-Eh%GSCf8DUsz2LR)KFE|=%oLwLQ8jRWIqbLh#{{X2XIhv+}E4n zNlxB|Z+#3PXK|boNRW#Zqq-7-7e;RyP2*8$k&IRv=P6soW85+U$0LOUIqt>x@(Ua; zJme>o02FHA^eo(NDFZW6_O6-X$NMzN?8!AeT=B7t@~ZuX;1ozO{A}#EC9`09O>?xR zOtxsR>PT%mA+qxk`Ct7^>uZ}b@tNrG5qp1+ex|Rk90a98-qL!1iMWcI;j2?rQI~g9 zA|W|k18gvbw-y~lqJkVk_V{FHSfB?1^->pxq;G7UHz`O7nGI@ty;T=#sN{gQgMb*Tev|J?2EDf9$&{tO6%Sjb0FN zCdGlS;;4|A0EX1156k-5y8@1|@55gijhL|mA|198!a4A?FT)OEE<36S3PJo|L!T$A z_mwy#u(&19NFOMS?ns34*S<786`0av!~q$hs`=RpebnxVIKG_*uW`sDUq?!`Gf`0U z$GSxebPsPH>~$S@5N^e9Y-0UQRe&Ob1xWOfQQK&K&gOfG>Roi5ucWRUR(6vB!t!6e zwyZh3=^TBndSLGSZpi?Eh!~a@WO!lEbbX*Rbxnqa9E*VTo)mt_`EADf{y3k6O$>^zm z^I5qCiWel93bzCaFf5RrAzA}`{%vN*AMyqLQDOraOu3MQmbs%}`PBXF3gSIuRaiS( z4_>oheFYAWw6(kLXlMF1KnB;-&mTJZwUv)J0BP|qiwoBr{6xY;7TGue0q}rK8U|pI zxelr|bc`SrKgx>9k29ZvBc&))>4V$QF zD@8Ry*jkm!1O9{qe`E~!AYwojJYyX%_+1EAaU=lDFm3b10b&k7=hzOb5oJV5L9w65FEUWc;@liM1B1vbHFq0-#hiZ@RT~GtFgP1G%ZVneeMe79dO+JM< z){xR?XB?nizF1WPfGKmWhC~_{fWBcMoX6@6|CWv=|B%HIOHLx~4uHO4tZ||P&#V!? zkO)Z7rAIme)uEICj%q;CJ%FC=$hei>u2)6IUr$i}CUw;84?9&q>#TVpkY-8e{z%4^ z;4jvMpVyfJ3+da4En59WhnY<@kgVezOe;WefDH&*-&hM{sGRMXEtVa~lkkM`0}Y$< zi*v9$3uw3us%rAWME&e+%^sVE_ zOA&}CVMP=H0TNVpf4~}&&R(TLhlx}T4-l4zRMUj%@o)A8ji^B8F%S`ICkFvf+0(uvGXb(6koR2)#$(5*% z3VD5(NcL=`92~O|<(GE+&AF0Efh z?)KOA$jgvKzLZd`O5p=ZKP#P}Yigvfr&N3^8B4M({#*qTKn#HrMV;e9A@gZ&;Q>o8 z%^A;kyanC2m{JD0ee23mizX3Z$fW_m8nc)nw{~B{1TG5o*Pmh7f8JXJEp0$hliAb?}5$x;Hm5=)*rx%=x1+PE+5ZF#~VaX$!(;CdsP-2}6(>)%GI24~e0r%O^^ zv=5CYsz*a3b8Q3^>{{K5-?Jf<$V-|vfKIRv=+Ji3@5XexjaL}K&AQ>iHvif9_~1uM zgqOryr^|Y)^1f+;J00dYO$A2Z!fFuTV^hsy70OG>F%JK+bCkpdf0L14n2G zeI6EW4>6l21erTED;&%9@brGB!T4n)A|jZkV5T(bD8x7WyMmMJo>f6Q}d*E_X=n-b4-kPun`B4upNJp0?;=QsB{z4s@d>SM<0eqRqCdyw1UHi z)4^Q87VmBrO@zj8kKi5Jo}ZCjZ|E1R(nf3=?^K30<{V8qFt*^M)*tp^T>-`x>zNxor~z=@^dUYwN|8^SVLgqnIKoS)@>Y zxBdzgQh^T&-Ajld-Jf{@B$QL|O_fzWzt2WDMWDc_SC7poMi-X*x)3D>cUn?1v*Ln0 zHe`~L0aoF`I^69^;=S7$|CYJ-5qtbQ~;>yc$>Kt3w%x( zv%6HlB&g8#Af4+}c74R}Mmo^HumAih?&sN7EC=@C0ZrYf9&PmC?~ueOQZTJUoDXT5 zU419~JwO>m-y%fvWNU4*!|5nU1LAH*e9KDQbw3y~gUW=F<fM+x zSFvVVta*T?r_Yjj5^X7S{T*_8fh?k5<)yuiRYS9CTQaD_CV1gw_Xu$x66^^(Z}of04SO?SQW8;uP}@+ zyqg<-pmC1)(4`;=LVHT5rNK?HTST&l5D*bY@gg>i)5wMRUV^lZ{B%xd0P!G|#vL$0 zw|L*&zx|tE+WO9JD2FglWtjxXw`vz>=oq@(!P!N4H3H}FwO>aA%NbDY?m30!&7xOK zG4&R74bAsK!VnXcFDl|zPMBda1#0AxK|~a=)$#;O?Jnuf;hhGQ)iO*+uB-o#tgis8 za% z{@1nf90i_d#jKfo?wK{C0Txo8AV~<$QwPn-xCIyUX>4P)NWdD;>wEc(RNmvohNJJt z_7_0U_uZVSv^%0^$6lM7t|ldzPHq-r3~zK7vEJEE2k@Pg6^;9=ilt*k<=`gx#`UEaTh^6Y!j^MD{)hoFkd+oEKJh}C5d|;GQz@c%p!>Wm_Vcw60EF!?JY@*-!}nq zSp!~S42$YB>+~^ea%Zm#kff3W9iQw^nR=C!H|sBLsT3NM^HzDzN(ToIAC5;zSMOM$ z3O>Io;&DOnV3&;UGLm2}DNUVGZF0psuNMulqLxSZgiw>xg@+o4?w7n4%$ejK>fg^zzD zRMv7_t=on)KUNxy8P;wHB(?l(Z0)$g2ah(*qHpIDQGtXIPF_+8k!CB%Qrj%}Nyk#| zU!euQ4XE|WFB0Ltk;Ekvg!py&Q0%CA!VPjjzua$o!j~IVc&TE4_oQZOb*d%y#hU(j z;~*qHnqf-l?CP>ikoWuE>&9$F8TOQ_q3qd56$LG<0Gr z^IJBglHK-+NI)0zGb_qBZ_)wdfhaF$o7;%lz%doMahA(9e7JpwpFL0L736QJ?(%F1 zp#M;x^K2kyM>&xnz$j|B3CL~!B5ufyl~Hf&Y7%0^hodJTK3p5`vzF`7z+iE@G{!bC zmexA4ml97s_X~9cI%*>wTAE8!tQJ+*5Ui z%kOqf$L|p$Gv+Si8bkIZ+Y&b*c zYF1y{tn%1Pt~d3OriD~(4p7kk zEB&GUh%%Rt1G_sJUtjFs0s^DmW#vZYMUk>+)Hx!&UKaq7_nAB@Rx_TQUkph~Lf zIp-D^ypmF{J-De44%o-Pj{$qIn3PV<2mT_X>y}^nzRSo}3dhu?0vRT*4GBsLr>cUk zhwh_Jj?9p$Qw&P#$-Dr@iz+c0;c#j_)!E(;BXkx{i?1}*ijxwOU2lS3mj_+~6lF2< zS>o}sA|0fe-POHcXpT;jsA1g6!g(>(iz;~MRVAk-^zq>KTNz_cpBEwA2#J3LG*z zd*MA+m0+8bHT>{oaS(@vTqxew&R%=`Gv>K?05&I6jn4d*nO^+mVo+;i-{)+GjJ%xa z!`~(l#6oeRe#=Q3fI?x3KHjChG4|sLD)7XdJW7z*`hI=c7(7Bq{Jk2~^tMT-S%*uT zEADR{DU>j3ByOdI1==R9Jyr@B)q_FeW%aEGFJjze&6Th4J!hiHaS;Lq^2pYm0uKbA z2?y%Ys*(0Lqe-&d{P<(h4v5-3+dhSmmBZv~JaJ0u%+13CPXPQr3-a-ZIAE!!(0&4Y zV$S#K)7k!l+5V)+7bFrIp-9>1s0Bjm()ed0xc%^Htkf73?)y^L4Su+7;WQUE6h@8t z!k_^2v9c3=AN2n#FV(iQ`k0>oCVx`um;}}t4+< zAG=Mg_jVhi`(M5fK#^|0i!b4k@>!G+n8T9tv;pJ)gy6 zkv#BJ)^e|JI_3`{9grD|?1oTy*Tl$>{PrC#G;u>hH3fB*+A*@yq^dE?T9cxmF5fZ8 zoOZ}dot(SV(#ykt|283Te?)Qja`8ps28iCD8!zJPuO5($?~~Wi44iCS zU866Hl`{o{z8J_3H~WHx!#P3jP%bt_*XyT=!&T72Rd0*%tdSOxh)RJ37{ycMfqunM z)|zXDE{wF|E|Q%X-Cq8=vL~nvF&Ff6{nMmu3h1;k*&G%XZzS_RoFLBbMLVNcIfXLI zl1jpc&S`IF;&^m{elghlt-qbZ>NSfc#-|3@3EV!PCLOUbk3)j4G5+L_$NbXvSKWI`(y4$QokJ}@9{JE7N(*P zR&L&L#wKY#%}$`ZPha0X=u_R$@*^DXdL&mL-D6h>rSyKeUC1-M(>Mk()eN4X!MA;8 zk3Mu88KZa?VT^zD{#jDz%jxC) zGddPrYr_sykbX+!gmf#ES#4ca4$6eh-av+iaT*c4>Wd;7&c#=awuXR9T_5SWSX=i2 zk>NPgxd1L7+sbJMk;jpiQ2@2E|7x$#u79Xs|ibeX=BzkF?u*LIpH&C zHd$0*s1O_D^(+GwCZ6YiNmOV48x`&wHR=-Y8tz)?6)BX&Q2W?(XP(`WAmBs+#BN;r zH(SWcK;uOAQW->N`nxYNYOG(}tK7vK_545ww3*|263YgTWkVFv3JFByba)+T!Am3D z>U@`wELoAZTG(uomuuYGVy1C=1v-Ar1iZtzSpWO|uRD;2to4xwZkxNBVOcuH-reWH16 zrd~!6)FiAED!6~e=jHvC`A5m6ZujPFyWaQQiFKMgX|G&jfzZuMWM>^-1KdAztyNVf zG9o`svF*z4x!>?Z`?Cl-eV~H(Yl0>*yvMztRhQK8Z|Ir@-1+sU`7_ZqljW-CN=gJz zMV(1`FEwmyS_q6ebXeMBUJDF%soQg^6#U50COME2_*2JhNDTM#BV2d>9y`{_ZPl+@ z`LFXe`1aYte{v9ejLtaThzOtwLcD5EGd5G(y^`?U{jsjbb$y>9Hd1Wf>AeiaHP5d{ zHWzNX*l(QPupM|&BR?e}+a(N-L>Q4-UNj!$Auu0mgpOeirNna)?-JWiy&JR{BzJM6 z;=?+suX1N+K4|HgyU-h)h#+sy<;48gsesr;pdSH_)v&Jxz)6<}Sv&#BebiHD2(;zN!{`vqfa& zmRKR$PfFBV-t3KiG(1Z!UClvf!}5AUT_1jBiFGjQ3}PGstzP7H^6M3+Ufwj!CF8AIyzjV&P4w&rXndB0c%@j>Sylf}t*t#63K^Y)5ZN_PN zRIRo;{`R8dYVw&56u}|#l{mwz(b0n5qLqi^8q3*`G!rvB9AZACH(UI{HYF9~ipXmj zC9$Ar*xM>5=RM`C&zNZ#2(Wa9xX^W{@>!hL7W|y{GX3a=D?JT-wa2Ao>t=RwXKY+@ z8EIF_XL~-Hi<=G2&hV5dmYbrx+#f9Rf{xR1I9iU0K2S+{!3wv}ZM+-npf5m_Dd~*) zY`8gUseI!^R_XjKLi557rB?+fg&G^cNMXw!FoZmg67A~+t zG*-^d=9_MzVfE z)(QBE335xa@APjTRF=T?p^UEgy5CI(1IeJg(9Y%*PJZ@F0hg@o%-IRP(>J66M=UZe zN?+|CIWFXzej#@~<}v=i|GaN`H(^K28p);M6826q_f)h)QQx5_7Fp1>uzz+dELeuZC2;5S8pdDe2<=du)H^LwaBB@Z{Iyc;O6 zo8RTpvaJn@XEHF&b|MYf7lx5WBbnD^c6JyYr=0qk4-!AaPET&oj7vPx+x*5dS9j+$ z!hGrZ#@v>y!Q|JV)|DS|`6GNzTX{!x!-d$$1-6;8C=5(fn(cLgI(E}|4nXoP>48lwDmT$^6gb!-g`pWPJ<7n;ELNTLpAD1&p zi3xkO;A9Lm#)EMtLFWZbv!Qg6Jx#0>{-hM)6kW0KxFKJ7eSl=^cb(eLH&FZN?O|Q6 zp6Jp(RvEk)=Ux$#>uDeGd-hH-27`k zBx^Q=(DDcbxgCTn5Frjl(Lw~nJ_l>DOctSAP?213iA7vqi9kDo85ZFU6Y z*1vZ=FIetz>V5WX9eToTz*io31MB<)J4DR^a!f)}#!xGOJ`+JAJL)t^?z(?G1p-n3 z0XMy&Zh=s%!8#%XZ${>cUb8ay#`lzO>8@QCs|PDrN~<*MCsW!-wrcn8A>y8LWySjF zE0^2}rG1^|T0L^18y-r>BrVw!`j6n}(0J2T z(qUyfyJar-oA2UmenyC1x#Z4@ zwdssAKKWwqU1D;pTpbAJahh=@=bZgVUzzwfL+2+E_w>2uUb4Dl4$t9qLvol5r|xx%72>Ti zFXCkQTHLFHHLz+a_3|wt)ETc{*qhPU+CbagHYMpEJDU zP>owsetB$RdTMp#Htc$EusmEIAZ~C*$Xt@}l=VqtaqCiY*X)kzxp=4eX~$E4(zhzj z_ZPWdR8)ELeMITjTDrbpCv}G5hDJZ*VXV(LAt{q**w=fq)@f*{z~s5IWb@ijIP3{E zz-^@hyVD8+3pF`6nVWU5$+c`4c+B}&jkZM(Nx9KNSv*f#+s%xZPf0Kpke9vW*Uaa#)k||HH{5C zVf`T**|Va(FVSQCIqQRYjO(s`Jode0TecB&?3q-;l$Tv0z;s4V*_C~_7)h?HiEpfx zpW3)b{j+^#hixnV#W}2~JmvRE;QrD3R8GOWD;~+;feOxrHJ~}6q_y~ z7Nm|A!s|()D@RGsOOMQ_EeTb!A>T(aY;* zBn80{yL-&nGx2g$b*?*g?4hQ|A=}mduW0~(x7*ElU#+zo&mH?dTjBlD&M70yb9C_# zi#PsPay+uR?|x=h_B}TGxZCC_P``>zb2~lYm#?3rPQ=a&eAcI0rywau#@FNt@<%c{c_a#%pbJr%hrj@O3%V|e| zet9~0y2a)#pa7y_Xn$-1mr(FefN~^HOzk9%h45c zoBZ@X2#oLhnt-Qt{$Q7L6HAB5r2V`sb%}m_5VvHmnbJO%xenk7In^|gI}p>5KkwE~ zw~0$jbx7cLR$C{_4-v9c6w@uWZ`!Ki%f7Zs6)^!%q8dztOGH|@pheS9m*xcj}>(xmgZ@E)og^{{;#@~u9=O6<@?uXQn^c4EG&K` zJS7l0MZ-puq>}G{$*Wn`prt%2@|FcH1Ig8V!=Zb^b$>DaepNpuHT@8)p;nv6`Uv0X zS1NalbiA)|JZez?`ZQ*>4;KR(o_V5!hdw-dIbGf9#)Oss=lFblvi*^7nT+&0n~!TU z+}yRW+iN+oq>=SD<)|@X^E9v$2|DqvRP0dFP+j}iAl*F~C%t=C>RM|=f7WmF+GAaR z(5E+DTsI{p3cu)Y3hi1yyf~xy*C5s}`G`dua@cRaGf$|ySzBCl^zbq+$^^Tv_X_PVdT|?l&b)_@t*yIdX_jk#XD9?0(=Wheu$sf3`p>Y= zm$-hSabcI$YFVN;GA37x3l9EbRu992 zY(R{<^GtFpPY>Vk_L9l|=E!plKVc%@M+wXaSV;(E&OE8d&|si_I04lp5XkMp`226=MZlxGL;d{_o_yQ{-R0 zdQf1{F&X{BLVIhrW1zSrtaieV{bf7#M~0vvy5IR>D(nFwAa|GJZv$e`l@s)8?6v!7 z!)M>HvLi2_pU@UA0KTD(^R21Q>R2)nv8P%Zt%lzOWvKBfk_U@rG!y&{(fggIhbTE=h4)@iGnK{u87d>w4`|8MR-K0g0C zCnzhyLC?gbzdBr&Xx&UaY=dMn`(21RRLnyHqpsUzsV!!cx#8DC84X8g#zvc$aW^V5 z+LN0t?n}79@@T$im42a_7dM+vdD~T><|R5TTD!F8njiEsv)n_P=9iEOIR8kOxznGc zf$U7;8+=}3F@nGyDyC;(m>hLkoB0%@IoYcyr{5MyUuZEBpQ6^3tnpQy73b0G!$nDN zDq%RI_X*oYa3@73nu{>l3bwaGHx6Af>ceOWuHuD0bKl_cd2tojn@SErBIci&U!M)3 zBf&RT)0x8s5snIKYHhHdPE*0$v7E;EJ?S#3iW#yXx9xBmpTa13;48pen6v+v!upPm zpzAEfNB(N+pDzl}XUjrQ9MdAA=tfGzSMu|t2JJ_%zpV>{1m=TJy8AD}smDf2M4Ar) zFZA?>9NqZbT>f|zt?K4z*76cciVn9I__x|Q_;-30mzT@_V zP5cf;Xcl@iZ{+=e^m2|84JnBUF6{5c0iu+kbbw;OK=%?YLas^KCtx>*KPGAWZZ4&x zh8Fy)$aQ;IU9AvFRUzZ@QLS+4yYDwa-M#4R1h>9^(%2Jx-7y=b)&2BEa7&E;(9+c0 z&vILw;X3C^>{A&SPr%}66`yYNTWzhC-$hd|VW-^P1)+?M9#pX*&|O!Y|LH*owkbWY z812!%mNAn}t8ogL&^Dt%U%ZTtk7mE@y~)jw4x>Oj{w#jM0+bh?JAq$MtGq5P6$yJr zvJ0DOwEKkt5T9(RZ>No3H@f&kKc%?6H5UgFdhVAaVCBS*;ipuR>hXA^_`3H+_sbLC zXLf0RW{$DC`zxnZH)V80HS;dA8xMO8duXQ^u6QUyP3sPrkaF-S=th(#3DL^6X zwhJ-F-CW&udqq&>r#bje%jutW+n$GbOFDkh2Zr{;FSuY#>bn-hmRPiR49RUeW%hm? z4X|O<%XR&wb|{U~$rCZD4!%A^+Urv5kL6=`ZsGy)5tn^9Ez{5Fm_ShSC_m_o&HdQ% zo?LQ!#acBI$x_o^Efz}Fu|Cm2%wd4_>eVaPgvv_(bh(ty{f()OBX-qiWRp)#zGh97 z@#SO`THP6I`^&GPJ|R}RMf1|U$MNNSwUW8HY1WrIeB5Krl(N?oT+yx|Yu;N;-DcQb z))`vV2bG!unT)@xa1x%mwyt(8Ic=;a47@n)rjqFVK5L~-gm!Rn(4NQ>(VZ$D5g#82 z`pjy6kn2q64}t@N*gD^xDj_3-qLciI z6Fk_?8Mbqj@+z5$^df-xg)5_enk^)9!Zr*W;L2=nZSgF9Eg%wb=FH5@L@E0`!=d9K&;Kro zRU;CK)CK({9n0$Z^(!M*!(Fn2J9lyuWl5qVEC;ysAN!bTZqPt!r!+mio)3l|`9pTV zkLq+#cB;zKs%8)6eJfm!ej9I3<`*$Nt!GiS^9-aw212v&F5zLL9vX5!5(;{O8~=rH z6aGF?%64ElX5ALJKv>+#_X=(j> zWcjYct0`9BCONUekpBME;~`h%+KVtF|#7rUT`j=5g(xndf?FjNIS zcaIdoZYSV$rGvC~0EhL(4#!fEGokk@35rw%BPjjK2@^S2(-7Z8uK27p$+!IW8+V~F zD~txtyhP^80Eslc;`c-H4f7nXjAf+dFJtea-oDmE7ph-jSTk{xZG&50Lc3rO}_$3b8_XjCo&0-DGq+Q;uIWjXOJ5 zFDHF~1tk9cTh^5Lm|HvImKzg_IP#UY*B}m2z5K)*{5|*pT(!4Q>6k3he7TO9!EUoy z_76DqURz6QS=AnylsnjA+KUjf`xn%-ng@p=cOuJ2bf&ONw@sP5CtXSq#mZ0kiBf7~ zHH;4^cIs>s69P=NcB*pSUZz4dYWi7kK3&*F1rYloHNHXJeYwq5-MybmTH+jX!ua&W zgQaem;PC-%b=r*f1sQ1kIkUdl94yJf{Gd1;{cv;SeG6&|8q~%;+D|rv^YI5mdk=ih zX3RK8bHvt%j1bzP`jYeI4e8P|Aig9&9KD}n#kvH9$Ju^>>t_Ab{+&CY8Ae28kNCki z!#3MDp;!~NCzsiIB?`w^XFT@Cg3~sgLcWcaOQi}kci59ypjnHeX3VrI-mr|h#hIHv zhncDD45FtMeV=K(meXv9K#j$ly!PRrG3ijxTh@;a_#>x2Jg$)Cybsa?D}Z{#$Hzk< zoLi1Jteo8T?<@j_^`{9Uy5{%Bb-%rVv+oyvU*503WUG$lrtOz65-oRl`t;1)qu_~3 zv`?CMTQ(AOpJ1C65{h|0*7eir8;8-d9$-wA&cLlkPp~8+`UBpC&EQO6`$0?w==L9G zJ)10;S1yUZS{@|N7?LS~s)EOmy_6`+Uwv7pmAW~wnBZxl{w4X$K_;G)z&oug12A!ON$p5;fFn;y4lJEr~zW#5NU-GJ> zi-LqG+O&BVhCMJk;8-GOYhSuP}#fHl=sy3>$b~1s*!hJBlk6CE+D`2g`ri_Sx zRq2`QCK-r8akeU2q8I9Ll2i`MYGC@kp-xNtmn)VR790J@IY=Eu_ z!C0UUbf10`pchc(s55D0bx*_U!GG1m8{8M*4SF>v)px!rQi>+nsMp~B3EK29nBQ~y zEu%@Bl+*F-{Q>3NXjtzC8ym34rB#@X;Sz7J#NRfKPE2mXnUIjKPk1M)1e_rO#<|)- zw*=^8rkUprR|HW8_U{jcJ=)v2M!*40F_FR?8AzG2uqk5qMd^x;!itn zxQjcu|BvJ`@!@M99V)wS!i8AXqR8KYHfDUnN;U@90N*)nO$>rhs9n1eLBuvgf|B(P zN?FiS{cdHiKcf71(BV1d_^5&GYnz#U{9|-4Q$&NB)F-cL$l4o|G=EHd1SaJ@90G_i z$L{rBQi@)lotDaJnv$XhweM{{gd%>hFngE{a@n4$H(<`vhV0r5)#|e z8y?vYh3FNUbu@pwg^#r#U{gL^*tsMN-VOMV2PUI;fRG5%rj?w;7 z)45M_bz>taRPlvsNW+3&Z=@~HYGDB;*MrSHWLH{#nm--5)rvW%92#h3=i?WVxfwL- zn(UcMJ0bA|ip}YKH7^+@d0@{j6jivau1>~+8mZNKV2Nejn2I+SXU`-OpBDhBGnO$8;e9Bpe>woH13G|i)0`j3qzW+CO3-oawg~Y9c>9d z-(#IQp9+hYTW#Oqo|EqM4k;?W@z00E`T7Ip-e=LevG+jDw9IU5nq&2(?d5j2yXas8 zNmvx6-xwZn{=*&r-W{&`3EYXNxJaAu*u=IvAn>qysPV7j8JWO#{Jm!S6A4`f8(Cc! zE;X>$zMfo32yi|N;9+##asout23*y1!xP`g_FMlQ3cz3F!Y)v68{C{v6-tvrs?awz z0*i0qZgWrAxvazOJgIXLTn`HG`$B5TZ^b8b{nc{T%R!ype#3nge1)HXcE~$L3&`|L zRUQJ%vG%!dLLw!eE$`x!8a$O^ZgQNJiTiKfyB7ul=RCc0rB*{n>T>m+tEIKU{6*8-CRusr2>{5zbeUR(sT=)2hYuJQPOOHP1JlH2>Zs%c zu3X~-LjHZfYfn1N9%(4^$Zu&mMuuuRzJ(P8>Wh|vrr=6yda|E3(LeigJZZQ5;B}?D z2m&{hB}`W?8RG2!BHR3A#gP5dRfk=LtHZDp>)gGHmAcm;KJ*z3biUijRS2I4NScNr z>0?gAibtIBO#fNrsocc#aYr*Xk3H?GF2~^sfp4S2Apd;)G>0sz_5q(1?61TO4Y1t& zPcDGU#)1ntdH6>|ygI@_GPdreG z;Gu(px_M68>_5&=l=V&G_j;qRHE#JeLp7A5!MLZ0@V~=;gX?1PEfkCz4=$*#uH6pT zjZUezan=-=^u~>W=c0bB^J-DxU1hbG5e!NXwnjyk_RsHI84ZMC6y4O1Tpn(N1|FHr z_wNy{Z)u>ox9uoer*VfGgC^QFysK!C{X#Uf-ZzKRiz$*ILSAx+n=wP@X4RDsxu65n z9Vk4$21o$H#ocCsAjx?Sj0POsQg3q!aHWs|OGvCK9YlFw%R%y%<93?&Igt~}Z&pRn?8 zo1~=tfQzClkd#anG}uVteKBhM59M|^k7~##-a*7U4e%0*Im)>d`oYzKqQ0w>^|}Ik zfwhzCKifVAjJZxaBbmxldXOigl~l)LE(Y8=u5~M9gq6DyT#@`?RE&O@?jNO zLKy4&F*&eHaga3vSs1LO7-t4kKOghE_uSmSvxMipKe>5`I6ADDXWK`3Tf#AKOdLCn z{|$LfBC888JVLFW#HM?(8J_xD7cl>`sQb<3kGC9O?XJNLfMq}~Y}ou>TTMsGQ|*5O z1c=gjU?nKqI7z9~(zSUsdY?bXH!?{>IT-?+30tQ&H1nP!R7+X zq&fB|(W+CQI$a-E>J}T9Rl6RD_4jHB_&7~{;LBuYYL0mKzQ2H{3V6SpCq?AcDB@26> z$!lpmHh)YnTjB>k5AYvTmY*}yKSoZHzW7IOPQ{yY%)Lb(RZ7tBhyO%zZ*H%6WOj;_ zZW2;H)hrAOMpmP83T&6@4Q;j?GvOQ!mfI6s3L+8&hOR=kIHYMYQarftmvQQR9hkMv z>kW6A?)!!Hbn32`Dnh6UQZwG_ml(_0Lp+ZOT${-eZbijslERX=d)$u*ow3M5pzZFr zWD)WFRb(E&%Mp1d<7h|to>r>jS}H3Ew)a`;n*WVh=e`aMVOPhAoI9%f&9)dveZ{77 z%wz%QDN*rQ`qky28#my$i|T8i87d(mpqvQQ1_XLE}& zZk_RxN2Bph5P~2;Gn>IVuHOuGn;inNI$iJtX(7SS5g;;X&ik&fTtGf(%-Its+5O#OK0e^LDxU@l$*UzB zZEha=E0c2mCMjbvg}1W**y;1pBJjcOH5#EwaWus98-|#fz>KtJtlK5ebTO#AA{bqH z#z4Ax6cL}~H8hn9!wa0$K%sWnjE~_Q`NZzO2N8__L)d#$!Ir+{NowQ^LiBk|J2C`b zJ(w>OM_E7l!l1y98p|n`yyY^pb8=aK0K-cOTs6RP6u5S0R zV1{DGbz@IKCqXn9NZ|ZL#|VXa-j{T#=_zuu6|#Q}KT^T`f4D_&AS3Wt6BszP!L3zbyYPf7S6lk)cj zy{R4}i1e1?^<80jwf%isTjmM{HW*hfqKM-B6%3i{J7C_%VvKpMj_|>8|DWG>oX%y0 z%r&8#8OUJm+o<39lKu7N>Q0P;MzP=8{fbxUoz)Y}t`hYBvmA}7f-S)`CA!@143PB( zc3Y>F68mwzHh6BI83r5nW_D<_#m)Zg!QaBGY$JB7l~|QT0>T>&`9|G2H%1$&cU#cj z$jE@NJ_#h#JY;;>*3{XNbCSgTLIqjZJqC(pM9i|fI--u}mGLAnc05f|z zJ|3Vdez+o{7o-!e&%>Wy+PozD;o)rmXR6pRF-b|}c>N3V>59uIGRp(G@lJArwauTd z(GOKc{Hx`eB)ZWNFkgrh&i^{K)ha&cPT#`2RQXC^C@B9|x1jdkf*Mmy?^;36S6 z|1NLqLL7-C%%vkMM!wpRTckPJ|BpwDmxBo*D2tMjDhS-^OW-ZV zFsEB>Ip1Dd1RNVL*dP`@^nnB0uW1z3Nicck#ri$!Zu44P$eb}cG6*a-zehzI^DL=6 zUw_Bu8Pj2Gd?0)HPj*xeS`ySlvjJ^l@I)M%rG$^WQ=9=JL9swY_(X>@%-DTnO z#@90wyo6^=pQ?*}(|Ya7afl@61TR@TRW8fbjx}~d_sO#z9frUl44*gD>1o|;H7t-k z2@Hs|lCm+|QnjYEx&9S-8lR#4!-zeJj`LjS+e=F-dws%?titRTQXdo9J_@dcYQ?lI zRzsR;%_BHdASz$gwlULH)pQR;?3kT+VI)kV<5ram?q_3|J~qlBPy&Xu(~0nWnm-NxtkUx zf&vkQ=WBWF5Vz;6}SY&*2 zZa_xfVUQgC@8Y<$Lc2o}fxx%X*^pF2!U$}qXg!I)HW9>|3IM=P7Y7+U)zEy?67w)c zyCtHhn2L`pN4q2D8uVM$T|MklRX-b8g5U_AvW8~c$n#|6Y#$P0hw!d9yCcpMmS!*e zCgld;R=^PV*NKHe?u`~dt&jQ@vRDaLx9<~n0H98cUg7QjGw*|sL*Yvv;z3a%To^6;TrzOF%Sun?$%snRo z{bxj0jKFi<7BL&W(u>vUI+-Nq)s1nwbvE-}wUJWtU+$k^K7kILXV-7erE zN;)EGdSIJOQJ`=7kt=6jaiaNa=WIma+m?sZYA27e;}6@K zF-7CSPlVQ=!b@$p`CM5yHLFMXeA5E^hz{#rpw04z_*YFtLSY0gT^r5y1RI&LI>e@} z?f&#kSXz=a1#k6bhnoEhJ1>ewFg%|HLS>Yx6I#Clmo45|cdn47HCNo0_$J$dB4eiF zu9ny0TJPOb9ua)+Xr1jqN6TLxVi_#czAnDIny%)Bq|?g2Cxp#$a9eL>qfrXUZ5S z=u^zSoAE5-mLmkTz0h&U`2YEzrh?MHFfXL*>;C3y!b4n6*H^3-)A+r8`^k)R@W%O% z)|l?puQ}zKUh9mEa*yww65p7cnaE{KfN8wjSCBBuR%M^!(7XGZ1>rrE>=;n05I}%| zdChSA`*W4EhKsT(h~~=78%DMwLY-n~;>lFicEneeaz@@Ef4?#q?cGS^wfBBl8%j#h zI5x4Ktc9qYBMkhQHXUE>WxYI797ek2(cGPPmc&`QX>t_feDPq~K;I_kL215#x#eRQRke!x>-tpPvYogRjLXLa;&*dh1u1nVF@pW;&zCr{=<8d~T{G3t z1l)@~`TlQ%Gmy2~cJmxW;&+8op!CGV=clRIH%AjP(^o3iN7D9S>mNu;fR#bm zKV^+F>1$!iQNj;-c$d6zc1?k|j6KSf`#oJBi@Bs{)eu)$LKLPj?rpNxer2R}P@q(8 zSX^}5LIgdm$18hMt`~Y|gxK`S`#n zn_gB)9dp%H|6-UmW+KZi!n-~B^ik@>=MrYssq|UX&w#YT_8xKh?(X=Amu8N~}Kn&wiKz=LY;;C!?&mMpOkWYv_8cFT=TNcsg6F+WqdZdV%^RkZT z5%S#0q!&Mk>UU+eK{U0>%2A#4VqVqLPs_7wCW!Xn4))?Ju|3?Ek5o7=!EAW1bP#xq zp3MW-$+5E^gzi=bPNtCz`$b+bd-Fd>U~YI~gpcTGekF~Jubn)MJld;2eyeqo@_iH~ z3wJ^)szK=@!t>bGwT?nGhVHR&oSy5WI*f+efA)B@&&0YR31`C)L;EQ~59a~5CjfFX zB;n;Z*TVa+`f0~TW~v9j|G^qaiK|kK(~_y{M=X$r&ewnPwt-cKaY)=%N0p)Pi=34a zk!{A?skM)3X>`IE&`}-{fflKNU__=1ca-0~{Fu$#6ZhhubBo#%tIpPkugfAvDRu_5 zNqn_&p}Mf1`o&@v4Da`JVQd@wZ{F~vFfv<-@2%*aPU8%enh81$s+L~y^HH4J%qsn| zhC6u!IdQHHIx)40K&wqbH(U ugtDzx-yl@NVNZHGNeo3q-tWs3$&ML@@Mz4oc|1 zoqbc72&!-?ux~Wc4&<#GO7~0#b_y8%vi)m@B~wC0k27SZpju}&()#^@Q2ZFpK6iE) z`|B5Q>bepJAWCL>iL& z*{FQ%1o29(uz{0_84ZmvHyAm))t{T7`oIAVCPZ+L`+h`JLB__cfZJf6GtarOP9)}< zk%Ka9$umSc%aZVTCkP3p6iLla(kW)%Cal2MZ__<`^S$$l?$Vc=1O^-1a!%zQgrjcD z%nI16R02Df`MAF}%U8*BtU6B!m~dLi!mO@9l}102RC&p*`*mk{#|g92(fU*G6vC6j zLWUqcWIq)@;{FuEdit(QiTOv?8*pt(zU0-^)Eqg@cvs8+PHvERK`&X*XKIs~TfV06 zL2PCy$yss-?jxPy0aF{_9a(J&|t!DS@b+s{}(|j$EeAO;)Uk8e!9pHm5PG@tADt zd(YDcAB^m0753B7voQr-R5QKh$`%GHJ5o_5O1Cy!W1)WbMb@i7 z>EJ)hS76OM!zv`5f^m_#>dT!^ZO=c@shJvh9Nz|A!O&YC880(r^?_#}d9R=8F#5-L zqj>w}$4?xo$(SA0BqGD_UY;@Sozc;?zVK6x&EaQ-UXq|sszJ{)yGnD!r~68E=4Zvh zu16DD%PLiGBE+#b8w%y-dAITS6AACnxi@f6jm_=>*|uWvoZV_VD>73lSV-!A)8seW z0@r@d+l6N01*YX2&p|nE93-Sl2)IX1d<&U6>xqw>8r50rzkV!9Uyem0CSRO$8YZXd zd@=h0=^!d=iTRb6+SC!`n&agXu*5`>xj*2ZOefXTilrkCI$Q^xu_&+(d!0XN%w)%yk zS$$*>LjJq!26xfi;^?K!r&V0^e!E*>_Ymcl+{G;ZrWRzvaMl#io_tY?I^p9gfLh<9 z9yUbJ?G`T>^|QRFU6vEPE3iY2?cpv6X9yvUyhqJQ(h@k>(|YHpmt@hR^^`^^E-|6p zN;;Ejg&y;@npjcjTf4(-*^%dU6f5-VVnzt*L@0xD4&|@wWSza-$@)WIW`@T;@vIzS zr;Fw25%*B-*j^L&o7@7&3^YjP z$u0%S&4yp(1@0S+w~MPn(6afqp6jJ2=^XN%Q3}x}yhwJADG0N%!qKWcMck!R&0+sr zUg)cxZK)5+cnVi}yl=~>L1`l4e3ynU*NwxeM%sIzr+2-*#V04;5E9^vJyE%=x{x6u z6j$HWJbBS>^2dc;^O4u7I!RIs0+T$#R3GW2%cM+t=E4XM#wY4X2J+&}h0Y!xX5J8O zZl0Q`WRnT{_Uy4X1|Dt z%Ekz1{Hu9MYeAZOIR(4EJQs8gzUv5aX$Id(b>c_IK&-dS^w<>KCL0W?tMCsdacZ2E+!G;}bch23RS z<*K*$;~xk86$jS4cA&R)PjI%%;5m##>8*VlDn;uq$4+I(oQ7x>+}~1%-W_dRxzckI zg1)bR!oc36aG*Uqcg(tFOOkGEt=#ou-?&rShP~&2%A$0O0`;(s!d8;sRbDtL<)Ivp zMO)a9i#C)#%l`{GdxWE1CgcNLC&d_QJV@|VAv_`Kk=UYjWTctOp@1=Mh|OBX81BvI zZ-k`&5{8xt$2Tu{2muZnU98d(39D}5A>3n+pkp^^wA&u;WQJ5i+YcG@%RPcD$%B>T27{6lLy9+zDnNJ&dKNUKPU>#v44i~eCck6eiTJRgmjAaDk>R}@gS`L-^k$wYh6kT1aO_UtP2nc z>b`HrgY}O;zE-)eak@v3d<5&V(qva#s$TiEqJ?!0_A5xrQ^m3JN zj?FJvwz)iPikQglR>f-p%frV%7!H!6AgU!;7q&TSt#;GnGfQ#nG`w5^{_c$^46O8V zZ-gl|Ezzvf+bp@K!hV}Z9i4QnjCk0DUwT?2h)#Bv1^NofKQ#vz7;Kv6)NEh$WIN!5 zWl?{Uzk4=Zl6@FLOj-S)zjog`87Wa9pZ((JK9h%|2HAPHO*@-PehF^@aQmc0`t=f* zMd0f7y87|Q-!CjjwHy2zS^R0*7n@~HA`^4x?t#ViYgoymNe@n**RdH5e7=mS_Ikqf zr@13-ahyhL3~+^1HXSoUdiPP>)6*UU{0!>^?`Ju^?+b6@5`{2j1Jj5?j z?#BNbYW3qRJe&!%NmU)+WAKbeK4Ht=+oz_v{e193%71bJV% zjl!_LXRv75iAMzOjQ4>~`jY*|Mh){s*Hz4jWp14|xkt)eugQcvjzSBbo8S^hy_vl( z@}%tjJISFv3K>cJEA#%1C@p&^AnFbs%JnkRt+_cKlQ$uv6(>Rk6H2rC z7&p#MRr;4>;NsmAD>C{ZvYAz%^;=v5p=y+FA%~jI?NhnJvjb;>(UG#&yF*1a#{}3J;44la&!cENP6zvRw9gnj?*Q&6#J>C9*SG1YnCG*5=A@DY-() z9hj$OCqv7dmQO?7FOWvIYj1C%{4^bkE#bjTG5z(I4hFeN$;875TN9`~+wRM?_A`nu z;rIsbjSgWk--4UIj4!+ceopHd#?uM&#C>?rMkX!Aea05&r64>p5hRS}(cHJaAC4((yUR$QyHZg`XBr&f1W0U6S~I_Cm&akD@Mhx=j0^km>Lx>|1Qz zrl?cCX`FS`&s5pmVPogrT{NnJs!hXc=V|mka}^KWZ9j)ffdkd=&$7jWkHxpgalt~} zSq@g=cDe0iYmS`H#{>DFraj(!r5N;|P_XbR&Mh!;%Q1U(6%XM zXveV>t-ijR9P*>g?AQVQ6iSM z2C-pt?9G$J1yA{+1zS$4$s0&_{6kCG6>#5Lr60}C4{C;~r?Nz@rZSvWsw;LR_)>Fm z_<><6px$l$s-UH{{o4%!VlhiehM@Q_GEG9SDX-VWdWMEx0)@Q2eWLP3MZgEk1-17) zqqlXZ-LX^#}e~s=o4ONNe6Fgm*Mk80%AcLr#~-NCy$(t%`6#_(!}3QMl6N|_ zxBa|;cl`dpc*o;^^G+iR52MG(3KN-j2s^C9az-z0bF*e`E^{4H5Yp}+wc__NMb{ps zk6XPScUo}WSBQ=b?DG?hQ!2C?By*-p#&7v9sXvYV3JnX>aJ=s|*;sMJCHE5n+b?JC zFP7bIpXyyx8nvM}1_|oW=zJ|Lfw!p%?-f9PQ#G{w(_Ff16}}yebIHXS-#(>YRXN(d zqSz7FWR@zF28@6|P`qh4RymLQIFoEn28x)j3>QSYsPz@R8Vw%V+|qk1T905`@3o6Q z`d&XXuct84lDI14dL}d_9nn~SPDXWQKCBVt;)qohh;!rJIOqW`hvt+s*U zGZ7gV(Hif&o@YELK<0Zh3rrGpY(l2&yNuPxewX=6CmEEo>FU_JRkvrO36ZWJ8K6(% z!4;IrHrD!kX2UHrN{I&R;H&T~3Q9-HX7s+PI1mJjc4fRe=H)JH>hgq>FLO*w1|{M4 z;Bsn9SI6Y=rtU(wAzKueQQfvCv-ZeGpMerbl;YT;59jq=H$aeUlN6VGD~=xJO|2Ehj{Wr9FUx*A`GH;{+@8p5E-!lS1c=AfB&8_oiK+5EpAJR>`9fnx5WnyKvg;_$ zCanuOl(zI*&fJzx!qy`7Wqq6RxCL*B6*Rh> zb~dOMCUMhX*`^Cl)-P2nOS5`Jkx=%owQIa9Q8w%2uS)XKbw|JV81?3|javSUkx1qm9vZL{6h!8_1jwa%#CV#^Go(A!!#{-|7W1FQw`cVh&O zG=O!GV$a-8P;8Wd9I%n&((L(V_O3jq=`K?Z_TW)c*9`G6MczGAjgEc`VOiJ5+?!IZ zBfKXnp(fl~AniKTv$|MdO39qN-t0~u z2lz0D(}~)bME!gRaB-9At*dv4hzje- zpp3#vc;a{IuzV08;ppN=^M$sXrr{W1>vcUBJero|%ZozMd|dZ5`vD#W1d>FkZ(D^k z#mUUxi`vo~H{soAQ#5cnVNtSOl?DH?2BhoVL?YpYycQ#}=S$oViBR$2I%-i^^I)OA zAd-P+hcMBCgKBRO<4Mfk!$fqxY1YuH-|~v*wSJGt4NlR{+6&KB^iKf{+859)5fj-g zbC5s?p))(n9GBfWhK#CRCc`C;87ftq^|tnX4V@o^(^kutZKFl2g74PpD;r0o z)~EVP_HW%AvF5ohMA}wLS?#UnzsA$6-{QfPu!pW0?0{98L2fZ~q%F76`-L^;Xeq4h zuIaGv7`9+@EXw#+>nrR@#d*K1h5d#sQ&sLHE9&)&n9@>yw=cVLGbPl|Q4&5)F+~xu zKN!z*-P|9s`$g9IEX#q^ocQn_q*YC&(tA%SW@~ez2D&C`_MNp6+G$rC*b~=& zp_b--BuYHfK@1Wb3yfv+nLntd3P`E}-$m$D^;areSd>|Zs|#G*E165+FFdV z&o{~e;jH2@A-?gH9ix2jT91O<)ZU!ZwlAYftW!>n-?RkQ!DeF6ZvWudiy1^WrByDG@pMNf9tgyYlku+mQDx1@_(9-1kA6KwjWVOP1&w5 z@RaZPaaSI-2%c2hvzUuYR7)0S-Q$>B^f}PplNmt`viF8~Hs_vK2vfAs%~IETipeF? zBeu=JAI?0px+9KbuB-{iBTeD^8oEc#>(hO!k^E(6CCSc$lY$x5=l9IXokr_qMJcuV z!qH_MryWLemiqkF)|}!GHRWdO2_>fbrebq29Bl9()g;yOcLjXQmRjB$Ic;bBqR!>h zPeJajb~>lD_3gT@a;Xl2bwrq`G!~O}`Ug)F6aeZH;4Gpvk1*+*UECMe@9zr~a8<(| z`TjhoX#u^Lx6Fwidv7&Bt(9Ze{`L3$G%}%k^Q_J-zPF@z|4PYT|%T$vCWNlfBRD7cx$mZGV&w>}ay%tplGDYGT^zYeAy@Ezb7SC54frkzJhrot0*K zi<*YgFyXpY?+5$c#p7DlM8^c0futU4QE^v)=x(qX$dI%du2;TQJ0`W5n{yIpJNB20 z-dJK7QMD^~AEt2j^WvC>f2><#pqF?XVIexn^o{MO?x`|P?+ z7$b>JYkWC1XO0vuJ5MtnC-|uyKM1y3yjv9`xW}jH${BJ=)z~jAE}}LRp{sn@K|`WB z>{A&p!}73vFvj#n`MY<~_SeAD35&IUhxy2`5G<Q)4X2|`GsBiQ6zSo zl&rhrG;zrY+^MM$F*78X4pv%MgI15d9Kyyktd5t_vHU0Y!zctMky0uob_)9KhFWt>~%20|Mh39b7o*%Ft?xEFmX)GAJkU0AI}JaLEf#Z@U6h5Xq_kg4rc7c zWaT3;zIiA~+TFT8{P=Jei=W}0x#(J^d^Rett%XV#_;2~9lrPLHd;qKM#8)fe>L23> zx1%Tz3v0$IH0iy!#8nYG-aF^*iu5h-qj@++mFiy#{!mN@s!bT#W`gS6(c|gmTzXa> zA9n2V)*7KWkJalbY|By1w^->(p%Lcz;jPH7<`|@Vn4WzvZzD^}RITc5WxOc}2ZXfZ zkTExt+I@o|;}O8%2J@V$4DV=W6Cm9XXbae|INfa7*4An;?bjHOfNWMYI0hGEDSISL=M1*2p|lAa2j#|pPH)Fvs-Vj zVxN;1^%)`f!W2UxAPVEs{~cE(et1e^}=%%9p`N<1<-l-|*1!9^UY9NtjKnCW1Bu#ur1FL`2#b z0%$(+xCmNvRTDC41af?~6+`-aB7s zB2<*LaD0jH7hp9hX%D4aEsR`gBYuAk6qSOdq%a_mPyfLiE-#5V#Dw|&wICQBD2nh3 z2QuG;eUOKJ==(j;s#kTgTbc@9z=+%^UUk%2X5H{*I z>L&4FW$^6upiGwerOFG=zvxk`?m+dep?y+uOXVwuesW9_aT=o00!KoR$%dM1$>Tn% z*C1*##Y)xC(1TOnuwel7P+wGx6i99rr-3Y(F_f~gMvhNxWRq60U`U&+cN{go~ zBQrJH$HuKCy%*0u&vIsN9A;4)_iUm%j_h;Ie;>$Pn~vMGP#~hZ0u!5hME|tdRd;Bl zJ2*BlmKx)7bJTie3(`b}gqbdxL|Fi5t%gg@@apezi!(NM&yT0S-mk8-v7?OaiWT>W zT(#FZ!5}+!8>|~$CDBqP*stbxpM&Ir4Be}HY-M7vnT8D-Din|*0KHq^UWuCP{W?*B zq7xBUo1LXEX5-iAn$?YElN$X+xkIE+u4dO{8i< zXCX<&bvqOW)Kz|(wnD}jhY3B0&&cEW%1rn-ef1q2nydj^Mn)s9R+LQa`08S~nF%mg z>@-+GU}VmD?}h^(AO1kO)m_pk*4r5m5wUia6vJTF56P!4s|X zppGvZ2ZA@)t%?>#E&~vY^7)sAOjuThWsdXNl4N98yhwj?aCfDGTqlhTUOn3r`ILw{ z*=V>c`62w_Bd<6%OpN-62(L(7(Dy_czKYjqY0c}{pj6XuvFND{SHV>g;o)WI^^nt1 zr1GxZB_N21?!<53dvi-!FFqu_tWo1aqJZP0(jX~Xi!@!bJBnM z)6C(n3#CH{C%ULsG=VDPv-?wY81CAtM%ho@FVtwG$@Get1YWyxfJw#1d_DW_9h?as z&u>LcMSUOdzZu8TQsw>-a(1DH>RXX1Stf2hPHQhn(pf}@k+7L#D z=r1Bbf0*~uMPVb?`T{1LxA(tc$Bx4zsjhI~+&2k~Ggd*PdhdQg)nr7b4cfkAT!~zSg%F`;7^nP}%4`@iy$>0YV(g#g zdSOV8c;ZAyLNoXc)BY8s_$s5Gp?%J^o>7dvQh)}(a zQ^G|+@%^7`rec$YEr$FcSu}TJ6;M3x7tBS_U_VR(F|JtRiNL``W=egtK+9tyEuzCg z+aSHf)b?j{|6S&{^-Q)%vuVf?z;7xrBXfU4$yKARLV)%Fx&GXj%_>)7F8S!gexqGM zuDFm1n85uv86trUw{U*brT}Pj<#viaE2@f?& zXUNn0&d+qXV&*q>9X(b1n1XBd-kWy|_K`9UK3++coi}-~l3$Nc##1~B z09G_a-gz|pHR?yo@P#}5CU0>rH5Zj8>Ks+1zC*+`ybGR_FL2f$v9&mfF*N^rDH~r@ zBU&;wt75xpwEsUL;R=m7Ki#gm=K{Y{i9w)uDhZ{!W|{EyCMT%b?|gL}cuJ@)li-v?cR)21uRg#m^Z<|t_9ZdSLWG=4>ZPA)HWkOJ5gi3Yn+ zx{y;{p%fB&m)_en{n2i=c%THk)PtYrm0p+alysE-OgN?QJJ}ESzq^l;4OF}9UbRJL zG(aQWh|9w~piD4Tdr#Lg7rBhC08*i2bIuJodB(AzT(9v2+DN3UpgT4tsidyoYol!-2&B_Od_I>T=-d~EI z74_lch~VIph=avy?9_V;H}w0foR~{F%#;tUm{_!+hHlx=s($`%6*pjGn=U6Wj+BxI@B2g>i-@f_5a#5evB>Rwv!ALT)Bpm}h(N3qv1~30~m0M;3jDorl^EvhL`4J z9Y~Gz4Pt!|>_kD!K^Jt+UC+gC7FS1J z4R3d2B78Bl%e~wXc!j}@w1F&{vg__NO1b{pu`BAOxY!hDJZRclL)>1F4pW5b2hBT* z%>IwhbZlmEQ_&?IF;}z(QBff9%^PW^K-N}*2+|+KZ!R$P(^VyCgiN2Q=twaunchob zw_`F;=)^c=@nphMKvihR;x~dUgtQ-TW1VO^F?OIQ8^RBnjr;)S4{1MsAV=HS82s2U znCd?;h!824z%n3_P4<6(w)!9MABOjv`RS@~|9O8~3U7w~Q?lNoZ)uVl#&3<~AziMP z3tkrs)ux^z1+}c74iOw#dHj0VT8|Vc4XI58GsJ+zBZfM*Joqo5${>(XL~D@y|M-l1 z>G#_IefJ=DBZOB*D8k<0yuGdh-dOqms0GBwWiP@cW$>t=aNKRr z+8BtSSYGk+#3dtnto^qBHpBtw={XZdzF@cp8xI5DOwxHysKa9Y#QWXzV3r@fMj*HvlD@>f&xo$?W`1Gihl2c^f!8QH&;H8wzRd*B}jPp zti+m^K?=mwy8vIf;rR3liq8{#sw*MvR`v%HoHG88NHibBX_SVjSFv~-PyLl$?BiDU zq1%$AWc>kL)<6%Ix{p@d2;Sp|_fH^X!K(%#ERg;Orw;f{Nl}kn&9Q>szhiJsxe=XreDJLqK zuQ#;TA0xD`qUfq}2aT-ATnkG|^6w+TCQJzP!De&VL3>Og^rpUuNm}Wnw6hLhU5ZYN zwA}1Wh$sYWFo`$9Aj?xhleqOgq0onC);Bxp{ry95><^lK86!NY8L1O~hNp(JXrh;< z9^2xf9k4UUZ7e9W;6s4=0~o38arY9*mtEE2^!9fuK!6^fvqfUOS5#2G?5A~%4VmEPYG%2g06T`ElYoW z542-5?4iM09l9jaJv}yW)N6uP!b+q~#2d7B>UH0h0xSDw5Ip0pNS!6wa4BB*n+cg_ z?6ul@F$t576jnORscu)Y*1OBSn|<2PK0c?cXM2FwO_ELnTQFU*X;AxD7I~S{8@ixtjxRS(L))gz+t;)Lb3oKe`zGzj)h4LiJA@yO$rdXN26i@ty90! zs%!PIsUbu?o4}3nqBol*1LBa{6K98XlL>lR-k$xY0U){;;KosK-oYJeKhtC48QXe@8gp~DiO46Gx~=43;x(v0509m zpQ1fUxN-}_T@=ykrU23X=<(Zxc>l=-h?E5ezAk+JrqhE0V(uuA%sl4sfz^pSJLQ@6 za|#OKVBKTew|$WJnj+Pl2ruI6?ThelkrDJ2{eB^+W#GzprCf>c?k~P*a`=Z|-j`1? z6}vX0mr=NN+_h=>qm}pdTXGY{35E#unCjrFhsabd4;)Y_zZ1SD=MNWrD+a2p?(+AY zNG|15H-s~F9O_slo)*kW%(-z#);wCAx%1QFHD>jMmVo**WIoE{?Y=MZ{9*cq)in7c z`d>%5F#r^TLnGv5hHpa9)%0}TuLRO{DAMuW2;ILClD*Z17~jfxz83(FDR|P>+_!af zh`F|&Cz3i}23s)ufq&M=z7GgUGN|jPtQ5Bqks2BspzJMZ>G-bK(?&g5y+@e#uz zAolWkl_E2{wBx#j{^X8pFO`|9u| zvu7P;2dZBp#si(aK9+^5|Hu~3Fb;`JXpOQEsLjC?PEJd6&3f!9-h`8+Od%n@B;~47 z5ih>_6|!69L+@^9a;~mszUO#D&B`CAiC#O~Kick{KK^QH;Q^f>Pk>sYvrlxW6x?Un znvIhyFzkIPpl-tpRQmhWW37usObKJQF$riv5KK?<%tWhWNrf@;P z*ZL+eDZ2jm>FZcj@1rDOwVj#&w7yZHJfJu`8{Zc+>)aWOCe}htzs`W>ck6ckz6PF? zjjlgQVQg7`!>(P}-o?5F`$waNtN9YjHV=hoHH`|OMa z?Q~EQ>Y1jf+AC^f;z8v|vA6mvD*^1@uW~B9Xp<@>Mw^O>+H};OQ#Wn2Nn~svjPm5` z?KyEhvq(#Jq(&EVBMFn^4O2KYN&-bvp(^)- z1(yJJ8UE=@s2Iep{lrCGt}DI)`3$67o@F6d$mb4H>x zT|m{vKyM<#I{)#@x!fMhB&r>5%E2mSB~0)>5f82e=&S6oMK&sNApcfh#nj|mo=f?W zT<*q+V2xhm$-dE-!TY)$sJNA9Tv)twOhMHL^J)6Cp-+VD@#jyM&#wz>A0?UVuJ#;qS$v`@Ed%qPZ}#(W3k$bZDXA*X-BeI^x>)+IF=c>-N0SyxbBxz;Cn z`$F_k+L10&AJ#|i>fEo}a9i6n%AZ2tDR{=65A#j!^mbuY-PRq7=qeO8duPNBlK4kH z<2oj*;+I~K>&@MqlIGSYhLXE&_}uyggM(1p`%SFa6~m{S#gi0=8G3~%y@w`FfqwwFIa za}kkNMFqAj;`+Hc$j?d4v33h;b@Af`da7*nF*ZzF2?$XV^Bn>+utGBBK1Q;BWpY1Y`~Ww7$l8~ zq2r|CcBkTd+|+y)FAL9M`s_?pWFcb!dzyh>NNkALt(9_Bl2W8dP&MFopw<6rq1yG` z>y@#Eaa~ek?T4qjh&z<;*uBY(Kj&)yN;%}>`8m0r`BYix&G$)NOLZ38G~-9iq)M)} zksYLx3=GR%A6%NIwlfPjM-#}uD6_>e$MI+BWCb-uLw-SyWuXXR2pGL>{ZWE0Urebc(F8a*w77P8Nw* z1PufZY4xAwz7S&|O3V8qj4)pOwcvE~|1Y zb#vPWBLANOrk}-SwVbMdVt5MCQs=%2{ag#Xm%kHM3cYoNty&fdr(`z^t1Q{RHRlR+ z#49DyQo-x%w%n1Mh`X|;bM_@FhG!dvj?CrGF{J}8e^1Yot}wf?`!x7^KWlwB*WQIO zFpwhqH*_0T9nOdfoHz3iu6L)lup;IIo6kk;l&`ARi zO`vykyS`q%LG)sEwISt3KtAlr9f^R+6U~vvZ zt33(jcwBQ7CBr*)g{jRzZF?-XcHH%J9rj+13l~*)`I$cxhVd0uI_`NA}Zr?qoknsBjv2J&cL{rb3lJ7sCb*-D}~7e|*H5Dxv~GvNS8Zsrjg(a# zVKt{WqsC8fq5qJKZeBa0<)k>foUa@{k|K|aMJcPMK%GUTe#Pr)=WOqa5~U{<3i+bFCflDlJ28FOlG-a9B}UdEwm$<&}g7C zQNp4+_A92z!Vl#_Uo9ni5IDpB&C_Z6^W6pMSW{N2&`WJ9Xqj)VAgOS?NEcaLAr$J3 zD^a?pq*lhc`}e^P$d}#K@_IVehZ8?@p|O zt!F;uKEue0a(DPomFSHJq6aFz}j72-P=E3!TsEyGqpNTG25PWc6GgK zhh}>`_5l-tZjV*wtF_(Iki-ND0Rr9KslySs)8J1krLmdori`5aGF zV(rmvQsIV&3fE85+qq?wCXq}tyKsKt&FwSwWjp+F<0n-UpmYC z$OAKY+K!ZTT3uaKQ@3p4cCg0&58Z{L%#cW>$^}o1eG(Ad4K}{0)1PD2QvZ7dU z-EWayZ^b>{YFBn-yLWp~RjH#=g_HMeDr+#$8evkS{IKq+aAm*^JUS0{nrDFnQN1~3 zA>%K0MN>l1DyHGQ>2GZsYBBkHrua!eE<4CP+57$q-S%}saY-z2Gt5Z^#1!m3?>eC! z7)~N@r^|RZTEdhI^{9F&E3f`ntY`=83}fkLkhQ))rO$WR<>+HR4S>=b@4V{)QdPBO zne~v6FWI-0@8Mu;>L%r-8T8ioR}oql#YH}Qb!d3NX$8t+=om7!fnIF=i11O6zw1fw zw*3n3#yvFA6)eQlbVA7&!<;qsrhT(nW;oJ&e{n|a^cpl?)Frs2Q@9p(gXFg zg5f(%l=p%AsG*q7%938kl$;cifPtGd>a%?2wvrgf@8vRX8x;T4?LbakFqr+|#Q51N zXV7PtrVu!NQb=O((G2#;je!6U^!#L~-Y{WxRXaetpJ?m&x#dqW;n`$Tmh_6kgkRU^ zdw%phd};8zMvYx3kv|@5RnTb$S*H?vQyrX;s0gJuZL8W&&^OD|UI43M(a4OB3?uYO zcP<%7=SNbHG9?V17@?q*=aipx0J~{dQse^z^NXZP%&DpAzLE?N!Tdx&E0Q@`jDg=J zN~tPhRn)I;hQ@?7??Epx!KnsxAJHQ2yYJsC$)ggLL?vi?Hb;r@;{Pdywx9sK0%jqN zeI)`cv4&~(uS2T7;?yfFZ3P>dhT2U2dRCGJVC6}H8EJ6PAj*z(zp%;0el46Hp46*= zY1%uaEcee6MdW!)Jf^L;A#G78fZaI(sul@Q(1Ss`{dy{&$ZCp)*PMrcde8Cje9F_~ zB5r`bHkitdy|}_PD)o4EDHa-9-LX ztG=3XDLW@px6@gJo<+!K*aQHK< zxovF28`o@ZtgInDt%W#@3L3^-sPHN(vK^^vo0k14^tDm3-qU;UL&79PK4gUfmkwV?`MS7fG(QYQRDmuT=F_Srea{Q26IQcr>}<3K z|CH1bNy5cKS$t_iOV#~LN(89UJ@`OLjj^I_brW=!6rGQrU$d`x9nC<{b!hFE0VfOQ zhEvTWP>o-(g>)hHR|^`UwG^1P!?C+R>jn0k$ALZGCXMYAofTmW#geF0YjI1UUIuAY z^1RCSW?*rg6ay3G8+PxW@3nA_L9vl4v8I-}(GI`-oSU3%fH#g>+Wbh{!7Z}m9J)Dl zq^2h)wpU(q1S@;4!x81(-!vceqFoWRy^TKW6nv}Js(ew{8Dp{ha3$wwyD72@$jtkP zO$9A29Z->#=B`(-0lCaRFORZpR{B$UmnJp8E%Hu>Z*;IlsQHdvH0Zn4?BcLiRJ z=ko2(mxAT{KF@qmCqvlgcNyT{ZkI?BmuVfocR<|3xBA00a01BO26r2Dt!(4id07Zy zg?fWt=`^O;u3!D>e+Q(b8f#bn(mN$K1dBj->eLlXZo$-k_%OW}5 z!J^{>?E881zrH6p3_FNOGsu4c^uUW6A(2)~mqcr}ap{rHv z7u(&YX+dfTC7#jHMmXv6v)DF&_9`4K(m_X8@oG#nm_`l|`?75g@*Fb2>MGy$5k*!Im=97f=SfCwv7hzfflUEJn_ldL8qbS_+R`^Yn#mjbV+5yFG|q=l?G;Yct@}l1BErL%u%s0EUb=zF z|7pA|(rdjv&FGtkjKG9Dravs{(V9pOXnxA4-$J=7ZH zcB_0(PmE1Zd8Os~>O+wS20}YdC8p|a4bxpIc`3NM82mkhL+x)%4L8OLgS*u=MMYY_ zRS{R7(;&sny87a`s6b*Quf6ByA0xTM;GC|qf=;JE1709 zN;(~g)K?*UbvA37n`e4KxO}^71&{`Goet zuPq=i*!D+?^?yE&0-o3rJ`!^Exo^lX_;&P#O-b?lJm)isA|4BUL!C{c1m=!*w^K2s zdK|=Ro$0>5vhOiHShRF~uJVX!QtQniXQKAzRSWr_HGUWCo8G@reme|0eL&Xf6<(Yo zi#GJBEiyN|MG*M@J($c=N@t+~@y@>~M6QdWp?U2yVS!o=ra8lO_aaSJoD-D;( zQ2>-~gGkb|UO<6UmCAaA7{eEaMyoVnx#W}(RI3Xlu@ZLC1Ms@39I#5P%RnmdHkI|O zcR`iJZ~5aC1XArA*e?=l*GpemUV?i>2`8>M;q-282FwrUEnPPqo-Fgv0SEstw+s?j zEH@CsIR%XQf3h7!#d3CHOSOkSATf}o0cVP*nU`T9?=;Nn|h~og-%#Zk>S9CFc zPia1%$-k98zO~y5FXntt23)u_GZX6SA{(T=Iv!5Vu%_vyKm0G;1ApNX#P01!Mg0k; z;Mv({Amq4-^fW(`l~dMZ$4hHYbzd(R)VRiY2l3@24HZT3jlhS{U`AI3`GC58I1%_e zSrZ>AHKf5nhMc$mvaF|I5G!-Sw#DgXs6`ntatTh$P+V!K!n5xsBqrQF&y++5HR?Bx z6c(AujxOjjkwqa3t=)PD8B@qr{Ikb2Z3V3?OjKmoIF2I>qdB+vdR|$VF9Zn#Tq+pp zyu%Hi8)b}x*NWSW6q)@>Oql>hXvcr){qHvmm|yvaW6r)MINw{=hh`g-Pu_R-5rEOO z@B^T2hVG6?*=76P+Hr+abIj)R8B{PqLR1iSPTN3*8*ay;`z*Y)&{y)jbP*7rDt9r~oBoXe5UhMgdJ-@00!(xx z@ZWOK{}opYk19Vc9`thhHtRT_)y6#(p{kZU(m&rEg@Vl~eHD{R`-fBR=GnGeYF1SQ zIarV?8+(kE=Og2iEq53)@+w0?sCa?jyGz>N0}>y$IL9a_HzPec$cRhiL!k%AHqUeY zR-eLwB5TCkZhpWZsaAeM9l|o02zYODb`qrj(4g+I%ii3pFK0V0SG2o&`;>h)A^`T= zlt^q}@&fnSZfmH1zu@4MhlsThgcv|| zWLN^F;+{&eC059j_Y?`M%$}D8(NbZ{~|P1-9-WA4JeFhn-*y_^?z~! zY{{A^9XO6D95Gtw-s?q9vmO63R2JI5Y&#PuasF+srKOdcnepnR+knY9y859{rmwds z%nw#N6SXIC;8W)(?P}en>-n_;r86($AjD8z6M1W-)VPH_wCuavwxyyo81n%DUh_TCrvh4cWC&oZ!X<@GVbVFGpLp ze_C-n@GTS2Kted`=GeWRXY8a=ZjXUj=K)Z}lrCwD+WFHW+PTMS8N-q$93+=F`olB( z%fTB7)=n-o_~E;mHr-GDWw6PL`LYrGJxXNjjp;qMrHT4MllChmrW(x|5s!>Vhwj@s zF#;?K@W$z9Z+R{)oW;TN9RjSX`-fxHysKY_jm$pMBY|1Fq*KNhX4x|L7S+L$8YI@m zfWHD~@=*U2ME_qsN|5hO&V3}+^+Bga=o!YKPxZHCLI|zpp(`x>08E>nGclv0Dlp`# zZ0dR5kG~`UQY@|@06t3A;z5V_AoLmm|8U@!6cd5I9dTP-+*C6Wc;5?Nz#x3+lty{nc>-$=7-GhbyBomD@i>0Vx_w!gIC0?A~dcShu2 zLHP5r5T-x>J$KLbrp5kIaR8IwZb|<+o0G%+gMA{S-gjwc!ox%0gb%6~`hur5@B$o> zlphRWj`+VW;VyLip{~dpFA6ro56ftK0l+KoFwI5#=hQCC{jTwCEBo~P!JoC(*+FA6 zpPtWDG&2#AVOo4xD6^L#8*VfrGQoe^HcSSq>L<8Y0Wp=LEi`lvCy(a8?rFoXBc3y7wr-?ZoL0~~efc~Vc z6m1NdW3n>ScT%@=Y7>Hvbf31>+SNz3CFIr8FBL%`{`2@8gH>tnY<20pia(v&9vtlP zvG4@7b)4&M9GvVt$Etb)_`(fu<!Fz0^->p$NN;C)fwK=&VW3UrGI|A~>2=)ewMTxer5 z;WcP2NqvLOd-CezMq9E!Tl8BM?%@}kD6!A~nm7W|qY7Yp^BSn5t5`BgdTJ7C&^#=CEL73^F=IDIN8Vgo_QsDX$B z?JU661;w_UZSyV--$IT7twFNf#wzdO_c%10*P)u+e=H)P*ed5C+a2$f`aQq`95&B} zjRz_z+#KoZv7+mck&4&|kLG!L5_BYN{aH>}du^Gu=UFLnRDat{n470CwLenSb8KBB zdcAQ2$^I?G7!pFd6&jGtoC*Z+gBE@9V{Al{i>yhRpk(?zoW>{8j;KBf#}-$<-WzxU1hqd!LOFkb9N}udymz+4 zq)NIRbKc~SLBdIqBpjH41jRh&&mRj=)Q3&cbLPH7?kQkbx_^hWem}``-M{@r1nw|R z04$g9JmynrIc_E+w5Sf&X+AljAXs;=mzB21II#&G?Z(~;yrZhzPhXj&|7pa>p zNA!i~BZS9gO+*C~vf*)+|6~KeCeb9Z6QpL6No~HVI#`&cS1vT7 zFjFhVxn_eG+z2TU!_(9dIeDPl^lHM&FmyauxXN8L^lpED9z?}y@FSH%p?x3HZH!$8 z@m<^b7_6w@8e>`GXquc$(PCnN@;soj=h~lbrE&;hhj{wb45LL#VR1}TtSrRvZlHTX zYKFXgZz}>peV5vQtO5LV4h~R))Kt)V8AKX{mTPUvY?Txrut%=`=B^6!)ld{u)$(v- z!*ilPR9g@{Ayxs%3&3BP42YtjHd!v*j8Z_DF9-1@oVr?)p;II!c>=uXx|8JuvC0*e zVOwzI{+GJ-^REB-MXj|bcwE2tb2<`6!1Cgq3pM#f#uwR7S)K$G75xbPIYMKSg|H%jc=hqDjt!Cxt+U+C5 za=!YyQ#W1k24=o|=|uvdfs(tiH;eAx#YIQ4LP8^iPnFr}CwaQqT^-5V0+9q<}={*9x zEA+H1^l!W!zw09>y|?|ZXZbNb6`h@vucOQ-H(gkj zN(lVMOrTY-b8Em7$KH`@Z3< zS}{i&_B4tEukry0{czICg3&OZ)LyEx!Yl!851mU_Vb{*03m1*DUb&0KkGHOk3Z~pzvkd*97bR9F(yajI9mIDgt(}0@!V%VYyFO; zj=An3bvb}v-m|SIShud;FYdQPZwaG*m3Zz8{{*jm{@VZ{%3Q%Wc0Z8j=)%i@8MYr_ z$xBBiQi6rG58u+>fhzWQTB-ehykl_9q1a>)4};pC5(6k~5mv~czSQp3F|RF&x#~@Z z=PI}!TU(kt6=tlBMSo9#{d|gZ5gcEfU%B`AS4+!3O;EpeeE{*bb|=8PqRIdi`V*LxF2$aw%dMd@H+VS1~|!pl%%b3{)15Hb!mdL zFFs~OND{ckwSJexx^>IacI?-QpgvgW#lqqdu{6PcQXEO!v~-mRh-(yOha@rhOh5$b zOZr1sOJQF?(Vh9x`?`U-l^%bTI%Wyim8R8ymKl8`P8=jy$Wh1s$roj%%lWW($Djep zGxvz z610OrKH1BX74w>9yA=RuI3zZcfkF$uUvM7tri;jkW4|60 zWIQV`*{EUXp*004HCx?RuN3cYU8<(14ehFXL<*ar^&mqPlA&8#^34uyi5{)NBuLVo$uEeHu#cJ}|m z+c!jJq%WZg9xH(kPLLC8CAnov?Q`8`v2_vAbK#$k`VK%o9lCn(oW z7|}ogDn4>EyVd}p!VzL0|oefI;7(JX(A zC;y5iH;j`0rr$kWrtlHyinY(PFet11*~JtZ(1yTk$wnmt-4K-zU!QR|7A@c^pPWyz zw+;|6QO*mKMK&dzF7d)?0_)Iwn_LDuXSb3mZqiCGcOk&G=c8_5(F62n8bu;IMEAc~ z_T1#GK6&x$*ROwvu@Qd%=xAGKniQ-6hd~v!#KE|LqqNQd`D6u2WE0wy1|rJUNYU3} z-{E<}EWt0h)LLtkxrm+njpR3}U9nu5uLA-|S?d;pIEc|D??+qQ;xEV?d!eq0d-m)0 zC-NfvKXZOO<~mRFmP4m@&^%Q|M~C8{y!ojSPSgb zZoDQRDfuH_G44OLYP-4ASCEGK>~DbxTYUjCqKc6W^IiW;@|J(Y`rn(U>fP|_T)Xf( z9I13UZQo*e2%L-j*MQoY&R*||L+spv1+`<9=b&sTdJZbX+o#dmk0taiulzG0H1qF( zgBYV#g|0~~=s`mldQ6w0t?_hthZ(O@nvci`{6$cWYJ)K}04y(%$!pKF!o+6ef9+8Q ztd{9h?)-fLST!q`{UM>#I|34dY%W2N? z3M~_Wj8h5YW72hVyCP@H%jB+|d?3iOWPc%v9FGJ?AAH#NH(1t9^uW$v;Tw2_?Y}#v zDzxys+)`9X9caO%EJDgijkPsqA33pi6z|lY`RMH*UGHRB*JmsKFP?8ft-b-yyY|lnzZ9CrC#wy#wSBs7U^E`q{2liBIt4rb|J|CWdH@|Aa z#E%|8{<0jr-?Rb;PW0q(fvVG*a7Ya*4qqAbEc*Jj)$MRx_CHhVHxts7{#D>N6a=wV zKk1uus&yr-rb&r@?a{WomW-&mInjy+s3re`<9*_H4LH)j6~5bTA&q`=KLS10lVy>O zAD13zYuh(7$Ep91d+^wYaLCA_mN+-c7}NtP6R;eue)0ENGZ0}xGAuNI{Vt`c7%T|T z`{EGDe+2TN5oGrj_XVDRjdZ!7y8R8a65?d?%3SeC#o&_zU*j|j?rV-D=5*+z0H zgQ?f1QR+dLO?05XbE*eh=ODV#&r?YbH&3pbx3NP z@0+qYH?fnZ48JOAO>^U*7Lj`^B88PER^2(IIkGNddbD8w#6A-w6 z|Nej7r^j)*@DO^#1o=ra&Ag4_JzLOZUFc8kYrNg+z0EAZah~kL(FpA{?!7P5Y@eu= zZRh^P?YO%fjU&lF7up!L%p3dpHhfwB=X!rCw(U7FD3m+oqOzdC1Pk8_`&=;E+wFGY z$f+sgfA;U>a4&do!E@|~3`~``QJ@&!I?_z=_=S)D5D+U=a;x5#*e@^xk-#5yVKXov z?e;&JJ24qZNDsNic|1D4VQ%_oc`D3~b{08wiu^Jn#S;JTj~EA z$kO-wp6&cU+ZP>j?qA_NP!ZZ}IT70VA**wIIMv1A=_^i`gLBX95Bp=-uhog&2rpd7 z{7A0&g+1isjlIo?x{m`i<|`q-*B@}aH(pQ)*`dU|#vJ3v^kM&9^3m$Mis}`!js^jV z2QI1e)t!qD@oi>9nj}|zYhK>l7%qxeL9mAS)HKA}^OToO&P~ebJ*CK+yLq;-bJBhy zSsSdn(tJonP7yk0Aq|h6U-q$QXJbpt%=F(=zDbNHBO~MD>e?l%rBz#7YllQipnv7! zt9v&HP81T9Z_c$Q)FtqmBjUbU^>^zV8#8cKpWL}vW2=N3Myiusm(fCyrHs8Cv#77J zL2b%q&pQk7&E5>i8u5b*U|uF`>^Jak8@8|Z z9YnuCBGU}2y*Qo}hoxq)VmXgGl3lFv&$8oDCqxfs;?0}HL`4_)0)IIT5}UGF(u##c1d}c!_%3=*D42jQ21! z;aB=audtk+Nh*?%oSsehF2=t{$08z<45J%!TfWVwQl}#+d2zZaQtm%1perpRA`;JI za#c<2kg5HF*3-R}!375)IGCfwrUbRn&~$J&(>2?5v$*hteEhvSt(WsPu~#)G`VaoT z0jA8XEFwa}SG+PBrL%9JU}9qaXUYk5O=05)1qavY#Bu8VJ!$VR?VxY#o}B^~K0XUS zf2`oRNA%u*=Ja3honV-moUDQG$7+jK6H}nl(kff8I);#h#g2$G_{#`=h|1Un336TC z15vi2^G3Btv9`5Y^atKYmzRR~e!~P`_7SgyjfKWMENpCuB7gH#&$mx(E3c!Y?(ZQ) zJ+rZyOSHG=EM2PUS~ZP-MaXqpTGeOI zZax3s-#@zK#V)4_bPkDM3Z;{iD=zID*DmX9L*x0lt-zCP!$9E8- zfj|`LWISMGx9Vek-u-9EZEqp#nRW<8*}fiYJTm&PrM^fct9(^v($1fzCRH)0Lh@sv z&jh|)TqhTZUPmJ6lIm4&&5s#g@5vz_*<0DS}&out$ zmoSB-W&Ziczwglp@9)X`_pJZ@8}Ft54xj(r)Sq8pAo&0OvVV4s&$w^Hh~nP6aK7FL zu*W%(^hmv6CCadS8!bA@x2ea!y#jMOdgrUq=h@>MyY0x?by+@tHu1Aq>rvay%RZq= zH4WkK>I!NaMu}@u-_ia-I@%CbYO_Ns?>e(@FbSNvU@iRx|%))P0s|3P>(mU!x@YwQZY^8ate>Hlv> z3&}}8?_}dCRuTTTCW(xMtzAt|f9jXgdI|oB&g^EJ z7nU=ZK|7TSZXoL;=jnNK=DyE6Ccd zNwHOSd*L8&qi^wWdE*}KZH<>ywC)r@Z2sp40jfgW%}9{!EO@@FCB%$dn=ooq_q$u! za!jF*(<7X*LeQv0XRvGw`>KIG<7Va6{{^{cevo44Q*r~z*2b!mL$huFx?rT?=e*ST z1Yh~)`hrB_QqJ9ScjCX_dgccxAZSI`hU=Idn*MPCS60O11T$jEFEmNm$3Uisul?Q6 z^!yx>^_Pam<$H7U58_Dg$Jpb@Xvi-JlVs!y!@YLCYxu3W7Ii89Nzx&YQ00Y;T&l4P z^q2i)(|VTg(g8rR;IGv_;~^xcm3pMhQ2hXe*o5JJ&DC03t$%v~lHI9QdN#0w4Hq0s zrk2*4{UJa?(r+9nBxa?tVtGIK^Vf4YPyMI37iMLR|AGD~61ffYc7jE!$zG;&%#-x^ z;9s9Dc7YzE;?HO2k@9Batv14ElMpIdSYH&E>*3pqr1_9mj3uZ>C&Yq3l^sJicmJ(7 zrTPMo3sa722c3eW4!ulvQaTcN^XY@D zF{y2ttn=jjvkexB0c!WSjW+eJg{s`k7Tl0CM(MD%-&Ji+ApWiWHjS5j6Fc~Z5YM&h z*O)*On=BG#pK8!%y0U!w%#oRyhF4h=WMILR)|GNCco7KPwn^*fwi&vP_vsrrur3Ez z0||#Kxx`+m2d!T(L6d2Ru1B zQs&%KYl~73jJa-(@CLS5V)STyZVUf@ez3~-`vLb+md?n@fz=7oZ57<_+dpxWMZR3B z7!B~vSt^BdL(0^x1=yRMY;Ri4U%e^5fs?=X4)Y8nw?XnK>nTjy&)vu>zjW4zceOjOJ--@Rbtv66a14YN~X+ctYpbz9nbwXG-A zc~f&QPrr5Rkn1n3b!)EL(Uv&zH+4_?w+@x?38{O7&yOr<&gfz5{=LWnh3PYwQk!fh*dTryIKT)3G&62L^e1;ifk5Wk5=NQB&?s}vd47JC? zj@yYO1BJjLUgOx&Uy$Arxwr1N1jyLhPaN|-V^{p5bNegI|M!!N@R$ur^t$)kA3@~F zPw564VSF!cT8YDCBLEP8{CsqO?Nr#Q0{7KK$p@p7$Avnd;o2e|7cJCK!`$Bldek)) zq*Rzwt9LN@XPK4XiIT}^{aNyEfZy}>=|+SuRn;peqZmQJhC0R=52qCXY{C(J~5!}S&i8@W7Ob5CZ3J1>|WJp zgkO_0GwUr{*w=7Rx~#)*IVYC;&s8my$yNl|ee55LoxApVZCxG32%m|x=tyMtIExJ2 z#1A&Ct+cl=DH(>*59B@MQvo`8qL`Qk=mr1B#pM|46j}GgtgfMvdvG(onf_>ONw!Zu z9@mw0Fup^RZV7LL+i*$>H~3|PGDbZv9L_^8F$}RmqKEIyg0BKincALRfomUlE)ZgR zk$t<)Og&bfC_tD}(W9@hc1_H~Sg1@6?`2ln$6J6(FpW#Mls*-3ny(KPG>dx$RTw-* zC!dpdTM@J~qtzqQGLsb4II?6OKDA%sAAc&im0iRd`E*@OP5~y##Y_ys`s*i-_W4ze z>=%3WMo>^0I@EA6kfx}rndn__+x-R*Ut~2KH()JbCUQ=Hl}vlcbamuMZ5i(;PWJkm zg{6Qv1A_2xS}|lZUltkyzYOLaX~bfA~F@Ub%WXl3y1w!wRN5Mej9s%9d$ z03C2>hWrwcR>_1$|EvZ|MgKnA9qBBwlZ&vR0q_H@MV&tHqnm-nc^}eQBfnvfPmINh zOK50ueoarFMwFJjZ^i9vPtsBPkDAG7kZ{KQO80n7xlDNS;SDcKVWP^3hE^;on3qGn zVY`z~1w=%-trPiVglP6tCR|e)xyuiKwrZ z&uwOw1?OHTeWMF}z0j_lC>mF)f2yHH(@@rQXfuYIp%a$29Agvn-lV7Xv}M-!dUf(x zAk_c45}6@)`IaW1N=9Mv8+%~hhBYJZA6Z3wrTQ;0#=DG1UzTY!`u6B&+fb9Bt%!o- zqRw9aX~0iEeO=yD9ulnvqg9py`#;NEQ?}14xCnB+ z(K5Fq6M%w@wog(OKZgo0U_drAt11dAr9HJECZUjnWU(42Sx6{7MM-JJ^Ax?@kj=kBk8ZEr^LmgWvyj8H;zMQQ76UKH3_`N#(;` ze@T!?wS@TU%AA-rxj5bNHG7=7$R2;_z+al%a!>bG=+;2x5xa6*I7zCQs&fAr!qi$% zO2X+Tp5Bpeqh5#SOXqvsc&{dTMh;gzfPO4e#nBU-@Zga1`WG0a`nEmGEMmo1>G|@w z6_$1^?8tw6ysKW87Do=~#*E_hY4;UsE2+w^d-n z6OoR`jxpw^4gVlq*ttr%oHJN7a|y&EJq(=eEDvzu$ z*4-SJn4hV^yzKoVQC82DXZUR^i+#(BH?mo;McjzI(lh|D*4tRP{XW(+L1kcK^E~ZS z6)CXFA=n`hP48AtpRnDvzZm){_D9eoH(V+u^z6AGbrRl9aKHQg`R#1py@^hBy&C83 zcMV4MF^iY|WG2-$^;=)9_xv2&zXWL-LNr)W+4zT6bg&v_P)-iQDkpdHXa4%#VrT4F z8P~4@fKdmexo337@=lX1~2*G)-DEL z-@~5eEgpwOQNKNotcS&%diOjXuZRZfDo9qRL3S$kEP0){Wqf3iAURejwW4>-pQEbQ z(UNaE@Ilyae@g;2%=YVUBYKE!Lu&R?x1oA*P^DEq&%H@V8uzmx&60BV%H5T7vG5*> z3X8nYwQW5~u$vae&F*vsSN?S+urKRspXVN_QxdMA8drX~tAJYu_H$shNmYqz@PwsM z<*SyPBdvR1)mTLwF9!mS%L5@1lSm6IpQMGdr?uHvICymoLv8#obY=*5H3l8U+=IHlwc=8?cpWf!Vn%fAVX%wGSvY`485Yo|~c|G4??SKpHijG)D?REd5SMow35H{U3w zaKPiJYod1nW9*f9fjGMyVsthB9#Zbxb2OzJPz5%SXGZ)gsu1v7se5)BE;kohZ-aU< zHKB@U^H{ZO(F9^~0~af!>9xiVl_()ZXR>8t3W}xZ=SX1$uUxm~$NYPjzMuLoB#Xp* zWoTkXL|~8%v9}2~S2>QqiT)M|J`A7qDGH%U(iJBvpH!_UPgr)gqg3b;%{zh z?i-fqD2$hebqTi5?0>O6ErFV&Zss_(v4!g|^OR_eF6SLbYqjGT2uD@AF{N>heyE&? z*AA(e%~hvp2$W%uwm(RTDAm7&22W}Lh_B5f#iSK&AvsIrp3};ZXDmC~>XLG8-nA8u zCRtkM$lW+N)oH!id0=ZptIzuKQ)aLh-5IvM74IP=C-hrK1Eeg^*p9;A1(khE$g~x2 z%hxghFT9dFAa3F8GJct3`?3R#%N2+$ySblU55Mm-)O4jBoNc=%i$?ip4I&WC91=CsQM`TqJMkjQX0LYt* z*!b#tFW&AIIoV7E+KNx*B$@MWR%Lfm;2`*V`-={GtBY(`1>+wl+7+9(8JFYJws{tK zL{yY;k=@s5M~b;kaY7+jYg5uS)-{?mus151U%6bM>%KotuBSU#W=sf3{j+UEZhp*{ zr@XXF-)2s~`|MO??ycFB`(9m*u|VNkwBV!j!6$$aU1Mpt*$4MVFWMw*qe<`w6^T6~ z^vy~{Axqs{kbBZM@mPACCzd8<{vae7cKFa1L-FkdwYj&mOySD(t~*R8k{KXTk1GK$ z*J?OKkn4_a>}+PJ=$x^UR&OETM5KD-XPIrmJ2WdgC_kG^a(2g>+n5m{^Wu(6oi^}U zM9(Ro-O||{3q?54@x6KZg;kYyq?x6oH65{k`F+>iz`Hi<^EKi|Rreq(bCC!TJXAnV z5PRp&RTj7pwW{U%!kP+1j~{3;KR*oGe=5i~T&?Z51W1B9Ean@w0m}`J#8=Z<#tul6 zX|nbEUIJufg3jeTw36;weP#l;y&~U?Snt5MPMroAe~TYDT9pHFMA~Lf494-Q?Aw*V zlWB?_N};RpCMOPq`XcAljI8hRdw8}_TKY1vl@K?6WQja+nwDnSDXfE6nM|JJ^KeT#U|n6|Djs4#eDn%Gog?t$S3r%6Lw#zQRw z6s1ld?(&kB<+nXIq$2P%8ZIuFH;572lS*wtd99LSf@ku ztU~m}uldR%kE3mYu{F`b(j;WAi?sFGl64OKzV~P2Ixb4CdF+WAm*4DZv?(bx>JrY= z*`VCjdWuj2`1!0PZq~namQm&btmDvrGmNM?;n2JIl6&9{6zx=l7L zfErqMK)^EzQeluetGVfkk&^hIOK2>BxsA5Ui?8p2Mxt{FBu8D*?+5&0`8KU?v)E9y zd4*(Ia(KAi?0xG0)x2aFVb4c{A){6J__pc#_^+~3R~JCm#H=W1zLctr{5nXsQT^jm z?pbfmfCpH7{Pey!-ESoClzQm<8v#hU7{Enpd*qNCJJ>h!&Ar>1v)t(}G8nUpB>3Ok zIPT^Pr)d$ZvniP*=qkft#aexO@&!w+`d&J*n})WQ>bM3mcX|0H$PC+cf^^0u!;2&C z+`f(DFXIeu02q9+>Ww2w5&I>lHrAH09Y+k8SdAr{nRq?mO`c&>N~VpkhQ_^}@D{`u zBepTGS7_yB0q5#1td1{+Yn3GLgnXdVsOcY(kF3ey{>E2mdwO4WWxTQKn?c5{Th1c9 zbgCLMiI;3V%>WDFiGj$)Mt%j5aqz&i%Xq3qUS* zs7iEffX6yxwrcJ^l>J*fw$Q6<7)PZn_T?9gt6?fndoj4|wqWlC>wQA8ec8lR<&1q37U?3aRC7^nVC;+AE;yL z7?Cs4Om*D_aq zFOoh3B+Ax7;sg#)ch1C7RoxD+4`Mqaq^RKGcP>eQX0K5!D9q6HU$KI1wJlr(GFq0= z(Pbb{1+R8N#!!3qD`dfSP>Xu-TU6|NwO`pc{{dQ03Wy*mY=K=Fu9p{pHqF~mHfXJX zA|@z)Mwz1;ScgCE@iWoCzzFpyXmKnsjo5~nn<2o&T>vGzs;Npd(%fIrF}asLM!%az zHc)a#1z9Cj&5+f^rjl;_JvZ<*1er8VRMhbi z1f3RdF_30%DAYA}={Mr%@|2hJ8n%)U9qFm6xDujS3MjUL0-q-idIdY7<|5QB%A0%m z0&n%k`GS|R8;+s+FTSZaS2G03%N{{N$A6vK`skS$DAUNG)jl)Xl%1BLLHO+P6qqp4>aTgl`i)eoo$2j1-9F1X(huWRoqrBT+BUGPCDiks(jb|JsMPDIka?=ZE!c6{g%Y&m5TGh%4cl;L?ETFBkLH9>5Kra!5%Jh3nr$nk{DCAzDm8+LpllGq02i7sPs6)3M1)L(zP0q9+9y85UCgk)AO5h-{a6273~ zD_<}CS~84Pbgqw-TEz1Ns*nj?Kq;V8Qa4Juf%e8E-<5EOpTg4zgB5F5PM=SR(7gw| z?DeQS!7yG!}0xi`leN}DWSNc^+B^-8iKriD5ec`zsBqhM{@GU5dnwgybbc^jh)Ba^yZNV(D zEr!YfeVL?#cb8nA_gz+237?@8hr-;5uebM$^J>3EVoLrcxjV}Sp?13)=_&;+Oz)6|KEN}3hA5zq4 zY$A3l?)bwXHqOSgG-B+-{!km#eEP>*s|`+Y9HTZVbbyrfp0a`Xbh2pq+TDVseahY7 zYKY|83pCcaEx@{+vFj%I(H(xYF5zuf0vh1wtvzHAIh$xXbU`anR&`x^5md zdSvL}n9VJG$zF-Loq;GC7Wu=1>CLRHtI7V3i@{0R_zl^SokQw@s7}>Uph6jZ`-nEj zUIcdVa!8=!>S}eLwMK<3h-oXx(kUm?1PtL$3JLh;#QkGXgs(sE!4Z zoRcoH^efbEj*QmVK%R-8`O5E@cMS=)7EEVGJip(9dY&A~n>t~4kzwj%E^As2H<`Kq ztF+59j_GMRVIe*}d=KJGP>&B%`JT0e4iuY;B|Lc>&3DItri-|%6OnqyId8tQ$u?Uw zsX@PmGrFdxBL9S+Vdy!<`$C9F>F??1#!@>d~v=^#D647T&wAb}V?)S)!7^T3LYHQiIqP@zd{} z$HD$0eC+{5(FM*VJ8ZPv1GQho>Y$I>Jxs>?+0$#Yp}xJ=>5L9nFA=Z1hja;c)EeJ0 z;`wH1^V|$wN?7KxfDE}`d31$m)NT5GxN2JetVzQ8!0nh+(Yhi30(;n7=}QhLwg$}Lh9D6D;31O(zR!}ZP+tXR#JEr@xzzZl6U$FPi`csb;lX6x6 z6`)c@Yj$A{yM1;n-AB#u-O-6b=L%U=%FA&7x@WN#_pX(oeprACCltD1dc5z))YGA zWj#!maa#5TYFWF213nQ%sJwDx!)SDRbV7V0r5)*5CQJT1F_2v)t)yt$C50WEMu$O< zoaXD-n&x22XQ!>Y5Hp+d{!w=kx2Zh6B}S!axH2b9#8zBIHWf-rYslKja93SV)5rj;Y_9gD(vaRvWR^ zv5Yq#s(T3pYMX`4XM=YJ+I+K{r}OKEkXC_|jBIa7LViLnc+W};?bEi(skPofI|QJA z_;Yz(LW9KV^MMb*BJ}v;4+~@fIWtwxd7@p9Yd%58CTfHc!@@GJu1E0Y=W)*WATdMl z1fnlhjIlsQ%Mph|k$sCc3jzRcL@CN?osj*d?5r5D7m!kGsfvBAB8YIW|!n33L)l zp%XzBsh$;G3y>;0`G!CQ_Nnc-uSH`)$%+NqaiQUwC{*S_AcBXdvw{pqOhD?NzVs6$ zK=+5C*FX*O&FRm`5o(7Hk%-3$bZ^3ZR_eZsoZcEjB`Sl23SBrmC~TsRI=2t522A7~ zPiUa)dadlbVR7=nHYI`1?vbFHg<416)E_L|F#^z2pUAZ}JP*h;J^7NDQ6bp%=t;Pm zA?9-jA<^kIm&?#YfI<3|jC%ju3*deS8qLtd3jhR&SRa*?e;}eGEb!ivGUd(HJAAVO zAlnYP939y2(s)k}tf@d9kF;n}1c|W8ro;%P)0OAcEto|}eO*pnngi5W>G?Gg5iVSq zBj}RjUCm(E9(5CEE@N5Xx8{Hv^y@qoxvbp-5&ow#pmy8QA6~x93yB$8d?p|=vxX?T zBSnva)fGbTR)Bv3^g0%#=n>Lm9G`mRyAQkVJ6IwXyrzAdwDPY6zFo1Pk)*TU%I0;_ z)OtaxUh33na4jMvJ3Rg44!KAOXz^x1tW0eTng%TKCns0s+!<|4(VZ^@SQcLJbn*#@ z&Z#yHYPLkC-4d$un6rWQMf=&FqY$UfI}X=Hug6JC$h|lj;msXf(?#~kZgsr~6o&Ya zALOQ%@nk7S1+jLs%8HPQ1wQVOs8wL z5z%EB=7Qqe^m<3Q<}LHK$QS3N$$H}R&&%}$-mWUm**i2nPF8W-F#wIqXy2JGx{wXm zyuaP+t=F`T>G&_16@zW&_Qv-3x``NJM5))cif+;g>)+4uc~_2L{l15N4jpe8{IYC* zXQ@~YQSOOLCGLr9?yH%_Rk6DM>pJkp`hbR#;*L-Mw2v4nIbYvizvV{SOx)xCAi`IIerTQFqUCfOUe*AuEN7-}cN_xfK zTz{h7IyLM~JIt`42BXf>=B8hBjy#=CqKBJ@+bZL(`PtF?rzL=$# zFTnp1pjH*VEu*336$0&!mVx58P~NDMWQiP5kDb+g)p!tlrmbQ&?2LW4Xxkl#xA{Q5 zCD@x61%=QO02u7itL!eT?QHD6Gr6!94vlx{KY>pO^a?_io&|voEH_*l@Z#73SA@O_ zT3|vWu9AexqRyS|5Q{A>ORhV2-o(d~gCDxNqoYX&?8UUSv?uZZD-*Gvfs^7o2z6JW zg&@jR@#floyHRtlcil4{w38*B$Q{|Xb1tc&MGf|2Azwm_(6U&_i(X;C{|bw-Fu>9V z6r3zEw1NegCYtWgR%$KaUiAX~oepA4U9-7KZ4@o_Q$sFY5Hdu7^#oKjVQ}Jr*$L&| z8#hCvk}az3%(Zisj9R%(F^ly+KF*ssX|KU~blK|TonU1$)Rj9#xt*YHy#)=>CEI?0Ag4&;EJ;_qJh znw22wugEm=oVCl@eFTfi4dLjc#23G|?OBC9V|o(Y!Odp(W%xOGfqsE7JSBYuVlQ1f zSj`{>PJMA6EdlaEbd1l!!ATehdJFi95Xs8p3gDDT?$wH&R@c^&wU?ry9rOCywYclo z;5<8(K@cZGZjOb8hg9$_9Lz&#YKh|Uh++UaZ-lw|L0|4Jo=Ei!X~tbXLD2gdU#w;l zT?$VVM8$_yM2ffTj)>S{u6Gmr=2EqfhxnjPiUxo0icWf}WX-KG0`wOpz z=Ni-qQ>{H*1cC1t+QEuYlc0SM6YoM4^3Q8$Lvp;c$-+OcZS9!ws?s$6yf&~P!4s`NUlw7k=3%*?3zGVda}SQ7LqPPJ>|C7#KKKtK6ar7ZAS%J z3tI1x+o3()p1oDe3r}xPbwYd&sg3>60fp?j;+AR`ckI(0d$&stOp&xu1iDuU=}-ekd$x+`0SLdy|hGLN7ad0@`R* z=O#29NRyJ=xz0~G;SlAZZeRD#OBZU>r<+@8Wy}-Jj5kWzIJ}lEA1CAfei+k<+P^U* z(|D6;E+~x=#f+IGO6@zooLRxwCcX4N%E9zb(qY8?59X1R@+4SNLlYWs?5U7u;x(zI zA5ktNw;jyxU>M0SiieS!UrI^v%_DzvjK|Et8QjiMBY>k&i9t+6C}ZA7ARItjdDZ9| zUK(nm(+=la!l?wC5XxW1SYCcTw4+DXJdIF;d`i>3YsvaOZv`W%lywJ3H>#;q=aGX* zK>P=sKd+^&83Gn)KfzGa^q%X`tMJDlhac;^ z>N+uUn^mg^BF_gHGs>o_vJp>f4=-OCw&)b)Z<`IJ(9ue)Fn z3qP%|8UKh!4OG1#WbvhG5uYE%f0vC)vaL#}Sg%TgXQVdkiPbs>R|ZjKi&;qr&hVtM z%h}=l;R>pwbm3@N{_b;Qg#GKSdSOa{u_cx2p5jO3Nq>$p-q0%NKMSu+1<@3gQ%esjm zr78K=w>KD!iMU-xKD&t8z}w4K*2K3b4c2Bmt7xF=xUa4AtGze0&_AGyF;|?Jt2#Dn zzV(M`v7v?4bFT@Y_AZ1S4scMa7QU)?{^UCL2BJa*^$;yKGeX~k~oC_o5j z@rCW^6guvem|wbN zg{(+iU}YMSuqgE<4(}$>ZgUNV;EU>C_|=MG@SdG)^cY94Y@%PSGuE<1R8c!?WQRa6 z!C|%1;c0E2L02r2*Vc%-#Evgp-$fMtpB(SAA+4}Pr|WXO)?1l4Ngix+yfb-=ICrgt zl!r-G-|D?EB)SPxt-x>1}VxL{jA5PaG2wR z0s{;R_q~)aH6}O2?u$!RJk%zpsyYgaOs%5RHle}RPn6Q_z`28x>uO)$?(Q23t07FS zkAiuS(Lgxwmw(|zbFK0EjE$~@HhMDq!2wi+Z@^$aC`p;iZh_<{ z+BoK6RguD9pyn!6uw>2bib79QuDE+>{?~7%7CFMo+HjSx!zXPX0^Yo-84$L=wP@2b zI)hj_OAjU9RqbBHxry?(6dVg|ww^nF!Ng=zbh#@bA7h|!$?C25!JSOk3MBvUNn}Z< zYLBwRTwZRYp>w@q;Z=JkfV_`yv##PmbPuddQY$<%<&!8@A9}3ICrq`#dPm^ZxfENB z*;J}Sd5babT+UrvBvYc<6#U&4%TDK-lHcmFai#o4S7k?M?4q-%f_B!ZSYW<#*&==U zi@k6A-LcnDUMkv+ZQo;&)T09w{k%?%yVPwTw+oDaA8I4{>TY(J4$LHY$n_sC&B_ga zUl5I`{~M)bQ@pQ>9;eg>bc{ozT;?R3lFx}KZgiV$=v#T>qy5?AR`)rHo);nB2o!wo#*r)cG|Mo33;x;1Qg5( zm$#dDh)ZfO<^0-k9p+wedpWv%)?S3KKh=3gQ%d>F((3sY2b@H+^bA;ceKo3ppoPm3 zOt-`mqWu}g5+v%H3`;huKYaLLD6gn!+#1j0@D|8$p`wCh2R zO|kq;G1xXW*lW2;LQ|HMvNBle3lM>*Kw!_n%t@^ zyM0o;IMU9c6(;W{S;g%=v<)uC$}7CTr_m?wER@++G<Y1~9P#uQG(LJuylSvz;PZn4iZoZPp zPN`e!7&C6lDM)adUI#qB)_z2}5@Au)q>`4>@T`u2gt~UYo{JvA7nY%+NJvR2BWH%X z0(g4Q?u~lLM?aDP6$+^#qJh9KTG~l7&mR%MPWP znM~9b(_(5T_FWN_1=+a1-OBO4K8J-mc{Qx}Fs_E(4gHXJ=D_R;XY1w%2)~ z9GovDtxV14Srn|G%9y!tICcL{g77@v0{{~8B2V<;=QfW8SJr>Ui?mZjb=5qh_^ldIkGXW4U z`4|UR43f=U)t6(MCO=jRS)Bw(6J~NKuri z4OXIAEj(xVsCwa2F~=!RWW^N~-Y(~Q1ddS@4{K(pV+yqUztiHsy}`aZa+|ZtWfloc zsi%F~CUcaHxZQk~rJ!r%QPYtBdbjz2rD>!vS(|CCx{kw3nSyf)wI}QsYrONhSkRkpI;u7T*xcWi-&+|c*Y34LN5R4vB(&fI{B^p@Fi1dJ+vA2K z{-{qJBOTq&sdey%Yf1C8Y<)7+f1Pdh;%i_i93C)<=RSS9D5v z&)!hk)iV=PKWkD?EWsc-|FG=9_v{z5^~n+=7@t}1eu;kGLPmsnK*4>)LA|N)HB_y+ z{(XXSvh%2DeYROwLdBqh6^a<(G50pfs|ltLBpTP)a1vbTecO{|rLFN}?o->|j852$ z8@UH?V6AgR!F*i2eTW_TVz;itppSo;|VEPDNiB{AtK#f+IAJ?>!rPk;KO-hLB0wfGa9#!LT<9KW)k-Eroe+j z55X&Y+LYX|5lq&p2c!=Y-35r2bYjiYtuu3ZIz$+^9LG%*h16MuiiaFVig$>LsvMht zZq-{zenQ*F9%#(G=p_#L%V^SxSrusqA@k6jF~3m=rrP15flfgGbR$qaeCHbk)KsCp zvPZ9Z!o<#>-9N&wGcC4D=S^r+jz3fa*kv+5lnmLkF{x=eZ^%1Nt${be-lV1sNGs1H z=)52!2YYcSM2g&c&yJ(d&`WibErDG5CNJ6OA*+BpBIO$&|AeJg11ae%euUpcZN_bI z)I;S1`I)nNzrYliLdE3)+{qQG!Fq!QbOUh5?@tlz8IGl2< zKKe`nk^T15mN~Tbm%y+01NcKw5)s;ngX7q&zP_eU+xMCc0?WhT)QM$pf_L>{8(;N) zzQk&|0eDmWtAWby7#(u3bW@5V^TAR-&gFyoGg}3C3GZ+HC06wkl3ugTR|58JArAh6 zS}fTCp&Uw{|^ z5ufI-1kNI0gPOK6h#hbIh%%ar8cMuBpRNx~B^%qo#;5=wBX4Yt$~?~~zJ6p9M%$Ko zqH3kIqUcVD`0`%Wyr?_5X}k@1XpnNUvrJ^b=XI~@ytThNcLNYpNBI=>+1`CB7Q6hB zptSeE6B!NyWLP&DJ(*m2838-}V6Ro{8D+t0#Y%5$LChL%K5q^9a>~gwp~R2VheK8= z=m8Fe3k+?wMosP-cjshQ!cp&O@be!-F9$Xkh&#ItH(#e_ObXSC#;LkQJ|lDTPJZ5$ zK3Cb*(#&20RD&FLR1~AX5Y6JSns>eQjaSmx?x5H9f;p57T_{eTD+z7oJmjfQI$FV- zmeepOtxs>xuI#)|1PFYY9H36tspFwa&2AHpbr*O`WWYLQP20vQNWET>AvwmgpJ|;A zDO|`{m+7z~11GK-Sk7WNF#r6D4866#=vV}|bQG1Z*yy$E22TPMKP+c+8&bYq459Zn z+u|XBZxFra32z79YO{gGPpwUf7@PeO*yC}{Z<7<#o!(^ZB$*U!m};wM^zc)4K0D9& zcB0UIBCop2Du`J`}yY)ZW^62Qv{HQ)oH2vmP0%-ehsj z94Hwga8{8#=mEPsD-!l%a4?cU(*~Y8%fho3&mA+aIii;i_~`|61fiFLFro$b^Y8JR zYF1=m{+@pOv~xFDcO@MicJ{boy&23Bpw~jB6=`XY?^hyz-?{Vr70dcxPGP`3xD>w) z3l}={ZT;xJZl<9V5E`en~Y909)-xove=M0@o2+i86_m`@^dwz4|xo{yHE4LD2 zOF|?=1_;gg`vC_Gh}`3G#GRclffL|&EQHZrzk9iD6?`iimV>Q6^lskl9olRevmw&vQ*E26AA_RPhuCqUU-yVI=f>u>Dg z#NAEk=Wc`}_q~edHZQFDdk>0LjY0Ks3pp!5&uLtZugv9`aQiqvgPybnTI`K;^C(4-|LFZ z`v!InrO5+uN$N4PMKkMVFD`X8(%D_OBFqHN#H4gsY<=xpis&q#M%@mAYbrrgWfdQi zQxK3Qv0s*~W(6|T?m@FSRNiK#sKKiPco8vSdBc}x4u6@#c}iP71J_7Yx;K>`k`K@@ zcUMY1%5w5Y*|BIa-8(xU&EHi%wbA=Mp&Mj>6x7kZ|IwVY!5Yjd@;#`&cP0-i%wbfr zM_uZEs~@L!YK+JM9C#97b>1FyTT!v`5Y1e;0kEEH%Ex4|W@(vb)(JV_u_5~adCGIO z`7&n05xwpY=tpSoU5QY^{-l3@z^XKXcs>~GPN(>!FTCxn^O%KaeEn=9Rz4E5 zEJ3ScF85f=xhbYe6ZW9=^C3|fX7&ht7Aw#vJldk_B;+vjd6zo=pNgpe~C1Jto zhv9BRAp)-L{@n@bQcQ_M4eb{zwq+5<>-Hjvj9xJ@-Nt@c*q|bbDYv!x0hZ8fsM6IGkSm`|I-+|E0Tv{^ky3`(MlOws0Ic0^VYN|_WF~BIai}h_*;z*Y z)hp5vAh934-=Iwvn21{Cc@;nrV7iic>=^AwW%-*re>7==)<67Zqdu40+uJYZQn{xx z28}EA0C|PMW3>hssaaxTgBl?c`?l$q9z(u*71enDB+oXJEQG?Cr5)}=#q{=I zaU*bBgjfjbtuT=3{xzyOWPwhw3avdHqXPy(L*7yP4oGA!raR=D*iR6fvm$D-f77&riyv)!lWB z=999B(04{(by>^?o}P!2Hz3=M06hTL;08jpqS-HvZ?6uY7%(lqBldJ6k$*aRKizC) zUVCt7vlSf4{jGRLc)5lU=*i6$--h1LFvGHT(2;?J1%P_4GF~g%v$1RMBta@A$aBap zNlIUN8rC7XMlpO6<;s^4J*bkw%Yb)P^$iik7PEvJ`cniVhP62DIKvrVKoe|#R_ON{>D~t{0mNU&)cO|Et z$8Le*4xSb8SnH)WyQ%LGU!A`Z|GY%G+La~&Gpxj8Ns&>Mmmsz9WTN0r%TlE2Pl|2R z_hDaX!HZ143T$3_uL{}Bsj2*c)>2wB<@r=~wDyUr0e-^r*+)JDFhK<-21nI%m+L-& z^99NF?g_pW9DvRH16SEz7b*&$!eliMwWWIm=DT#$b==g2=+_zN@$Q-gX-ssp9hv2D zTwV6Y2d9@RXVw= z0-yF?!+$RXP_rHDT$=RGaFdoHN2~SWG z0h5v!nv1n1(NvjwM^;Es%!W#imRW|&LY!xg;uyWUOvhg&GWd_G_4}`~86*X$cpn-0 zP%ws^nf_6bo8=+{*|-;SdS@>{e3A+@49>JuLFt-rU#`C5eP+EAT0t&uZxaNsAiA{S`y?6x&z zoruMTYMrjfe1t|<*~z=#F6`8bBu2IC;iCzExrI*Yx3gUj1C=nOWJ!>(3qcMDDpWx+ zJEemqN-Ns-`u9)ZMt#k* zrLX7ogjEzu>9nxK%mf>-0S|m}QS)?&7SmXgT+pP1iY-ffsH(GEuiBNA{p)b#U~r{R z=^Ys<#$v^GR&Xj_<}73CEo*?hKF@@ojM&$ic3L}w90_VfqV4sO?zVRN9)6LAdeJ(( z^Nh^4Zz-Sgt<_OSn&px3o3>0v?el3KL!Gg8(~7{?!$DLkS;8~3OL|{F<<|e!`cQbQ zz&QLSuL5Kdt&QASLE4~3I%r)=I@)rbk=Yl7HWvjuJG8agH1Pc`#y;GDSN(RTM_||8 zPOFN0ca)!@$$g9fhgBBjqe8d$RtAn!}rIi>fUdj~0#gDXdhfvJu9yUqo&!$oNQ>u=&Ib7zMvo?XfLOdD2&;)}03-n?QpM-OW%f#%|o!-IXWQdj-kG(U+`^y_v_ld_!I) zga8^e2j!DG72EZddFed(f@=Yr)Tc@dGnzhe9mhYlD7%k%M)HN>Pwb znAoo$XIbGJ(K44dR-1TGca%o!KJIGQ6^;V7j-!l4dHEj7^Wwb@@DA z65z)yw}*2XVArSn6JOm7_idLpKi$ezsdVYO^bsBV;U*Bo@3)rnoZCk>_=s~wKxnwS z`_$O==hshykXG-=8b~KHykNd^EAJkf{uj^{NHkb2hg<5@v%0D$y>fl@XzDe5*~pigylS5ABE&SQm}PD4`ddEhRDh9EcYlp3evWTk*zosWS#GMZYKg3 zdTZJfft{TNQWgVBHi!Vd{N>ST>#;BZ5O~duoeQuv)Of)R@bOwq_#9c@;W)8?#TsQPvk_|#ehuHqs#S^G! zP0!KR0N1@H?4Db~g#Q5mZ5Rd)j!sJNE%-YaS45r=w-iz9=;Go+q-R6V7cSW9mtxM0ulqzs zqqzLXU3|9KLdDxulmI@5mErF*-zJr+*p537V@{|;p9{cY02^(9It0JJts=0C_9VwrNZ40(#}vSuNjekvPxF zLT$slw??-%b=WU`SSFAk2*cDHZHr0CMbVCQE z64n|wyhA^qA4a-SV-`|aCKN?z5onSERwF-DUjxxPDpr{TL~#9~Y(kqyAPM{iA!yOvQs zOIDG)D^LkJVBW3~#h8-ckaBm~v(EXNk?b5|e=HL`hTSLC&uRpx4U>kDK2Mfw>ps2h zEe@KAv@>&06fYsF-_fCCg|nYTpoPTVi5nyqKP1#*e7|s9#%WSeJeV8J}r23E+c{t&A#>oI0 zHHd<`wTEFsC36!4-%;#Ydd>2^=r?O5A9ar_R8NP04M6e=3@T|m(!%OC@#I23s*A%V zVURr)lKKJ&b<*ZuoCiSpj?6;|TPHh?4r-*I3LRkzG4WITc8;D}<#1-sw`|Jc`FRn^ zgmSD}F)^lr8Yf=`-2h%juOCwY_6E7W`GRT__RZgOA7m+6$*{OeL2`r64J0U#R^*#_ z1a~&jgle?B>PRS_5hkWk;*IMfq;$R&N5eUoyQ!&>vC2IDpIx-}DQ@<5fZJummBl6J zh)il1&pihc9|B$7jNalNbgSO8L%cW)Pr&xELl);YKu&M*1)G^&jz6Jga21%%IprRYy)3qynY+`WnJMg%hJs+tu#5-GJa2g=&|# zWn$f}F*>s}_@Minq8dgA>W5aB=PEVYYkxslEE?h_M9)IV6SwZ~bUh&E?zAj>bGK5Usy8xt+{*~$F!N{`L zkKrH>go(^&Wu5BvOMKw$f+4B&e1~T*PBIZxEtoxyHaj3;f}p}35e|8y9PWuFp$qY> z*@3PUt(PQp&2Obe;f6VraA{2J!OO`sYU=O5(B|S#81598Q`E(e8;7?fD;K4Tv3cs8 z8Vnf3$a0AN#@eCZ>04Kq9?NN-vN^|065#$9ALTt)^Sjt&BGo|-)WCUL%rN|X^)Y#K z9Gxb*jvz8z+xP28kp?;#c>yY(F!)>o-<9JAcZh;<*rBuXx?48?{EV#2 zm)?Kvmixy~83rPM`OKfgl;xPCqxj2^{{Bss3jgfr`}f}w1v^1n=-+Pf|F?xH`Cp!d ztAzcqBfjg;mgW%wA>cwLR29g6mIA4*tFjT=?Ivm(#E{i1gAiP=?*)y&nxxE7 z*Z|CFaEfvM?fUvhpA(^YvPZ`d9z!gJcWx+<;duS_xo2q_a*8tdJo*SvkfLQlE>2rd z`og+SJ2Dp4GBBgaAy#y9NKX0ZRfc-foBu6_`qy1{#Q9%%ssFQES7_Zi8GGEF2#<6< zusPkq^bh<0TLah+8NAJ_K%{c04ic4lk66K7|L2bo+Bus1+lBC3_n1GLcmL}Y6OCm3 r#XtT1UN_5Z{@vhZKlA+$-4;)IaB*e|3t_UYjZcR~zePb#4#|d&g{5|D84J!QG+bQ5$cSVggvuRT)8Ft} zPELXz6BE-CuYg9w4fz8n2vNC8J%2k=cA^AtNe7K2PfsW3u8ZZGJ1NyT|24gD{V12Y zc6f}Lb_Va~eix-u_lwIpvGbk%MpO|Jzoz08Y7ikbo^PF!oFZ*UB0`To{%vZcxAZ}% z5LpHUpUE?9ILk_^o<`1FaH87m)K@8i8!Zj&GiJs6{xPo@UI-PHpW*x4FC04gKJqOu z;>^wmsONCNvS9I)9b;^A^pk<>$Y0N&a?tVy3NVr$Kdm#mU|((H9U1Nq&2X5l37W~! zH=Hpi-G+bmQ7WmwF6qk-4%8uKp%Z<)x~|kV{?SY8BKMh%6vhUHz{Y!h^68+5D)@XU zOqhZq-wBzE%W>4_d%xi1IFBTVyt^tmhiMt^$?Svk0k;=fh6o1!CiW;2*I2$57WuL(#fzeYkwIM`^KblC`wNSdY30%L?w_Idb6e?-#b$LRyU zDI(aY=j0pa+Rmx2R^k%8z^aGyPA$KcVMwr7WxjMYA*$tLm$cXRE{H73Tu_Tyhc|2TBV zwU$qV&;6F171eQrXe)t1Sw3Ax1Thi;lhJ`<<9@M`0oVDFpYLZGrwC0Em!043G}9@Z zL_B=MUC7Voab7{1Us3tQI$IO8dFfp;plLw4MwYO?`{ zq0s6bfhM_TkIfO4M@UN*(AD-*en?z*B(w)B1%c*xw@k`J;2qyBX{?PUEY!5!?<#G> zwdQale2U_f+SuD&vJmL&FR!_ZthK=k@j+#STC>7kC<-&ot^QaRZgX2Zchd97j4KR; zPPIy5Dcd@D%KISPvlnVq7&C9{=Cc?l8%7(HWpq_H8`WY>t*_cmiy;sZ$pp_kbo1+r zbTUp-=5ZJ(fc5)8LTEU}Ln!bx$38-_H8guz9em7b#G+9LG= zM*>alhauqYdKHr|lNFQj5V_{d$w6#X1FJ#otp3`Lu#2Z=E(l+Cj!})bd_=IaZYi~p zs_9>X384?z%#h?`vibmoC>>}|{`Bjk%H?$qeE!&Qm27{=BD^h%@Hq0M6~y%Q$v`=E z{$e6=Q1*K~M=;_ys*8HLe(MuHxw4{;fa&Z2S6cgMmyd$eqStgbq>R@ocPt8zKfOwR zES-kvbKId4>|!^)j*TpABqD6M`x*}Ld=9-3ZWiEi{k1Hc^qy~snI!;N-^-N5PY;gqSBM2n-ftk|jq{`9s{ERceN=X^keXImt;M}x5 zXc9uouaSDq!~KG?^VKi!cP72L7R=YDsS}2|@J$JUk-HgJvsOvlL5~99Y70&LN=w?T zHK^q%gnyDUd0d*Lr=BZyWo%fSeG|KEn6$Htjm&0j==f)?VN7J?pWEBB&30%myVz}8L}$xanR+8MlfztN74ts-)FQ~O z3Oi^kbimo&{U?`x?Xk!4XzAxSrTVfz$Y9`DGC*#6mI#CzbJt&itKd#z9A6Ce)+y5au;~99rZgLk@Z#-g7WU zN^%GV;P+nQ+05?Cf#YOe8~41|%PFiNi|550`3?DYGZ}<5vhd#GD2zqiCe{bHdv#@4 zR7D?F`%!V|*TDf2%;KdXmQA0@MXSxsTf`U-Lv1z6QE9Qh>A5ke0p8oG@5LEAM#WyB%Xp*Y7{vGqk7z_~3Mfs0?VP zblZNfF2~8%Vjr@s`uRqwQx}Oqtr(0e zZX#mW9}STS4Qm$+H!r**@uu3RmX2GE6Ubi=D}vOOAu)o|1j=6XcLHWG>xW~drKKf~ zylu}+Pf!1N_lORFlzfj2Q9TRAD}o{5O1n+)4T}44ggwmdWBdkDo4VkSUcqic@8xb_ zBJip;g2FX~&?yA#p@}gul8(i3)xDRNaW24ESyJm@dco#Rcpwem`5AVmmzD%GG&y~9 zCP$eoZu;!Wixo}LSAy&H&~BYkVk5laMTaETCY_{bw_8YJE$O8c@$c#v>qN_gav`~3 zpd4|GRhoKEm44&b?`FHk0N)gx&Dt)4IUQ{-7BCOxBW*Q@!qg~Qi_Tx31*TBN6k#d# zpOXX0r-Kc7f$9!hns&22g4dpgGJ{lmgUlePK<===vsp!q9};cZ9~OE|Q1E1?`wO-i zj12?9k`T(?JO-e5tUAqSW>&Z56=sQJ3bNrGBsBr)LF#uTL~HW<+cFAMO7N!JTL13B z7ef=|r(2faig}YqvIH8O7|U8n6OeaD>~1$e=adyFeb#hL9h%|jiiM~2WVl&30+B4{ zC1@GSf^ac-ro0fGg&;??FfDH%GMiaUpS}(lGSj2ty{6kIW{}n-hmMZ)zjVW6Y8#%mtI`sBn1p66#SKY&-hkzGjk=hr zIO+(S?~|(W$7{0ECr4?0^XsYBOELQPPotrA=EsJbFaXQ@J-L4RS=~Ft9CXbG0i@1H zXz^x!JsDZcvXuR_X?yCjTyv;0SR5i+xFDGTL`pcI9{i(v%)W=KNkM zm?sAi*2Gde&28=d4NPcQ6CWdrI*vrx`W{H=B!et{{YCL}o-k!8i07m1Z$usJ3_ z_2|{XJVfqyM2q(6_Jc@mx0>>p1u0;;4{6arg)2)^nja1;9v3lI?!ewP72RXz5tbY7 z^ci;=mhvNfYTw-q>n$^NoT!xg*#pV{GRzaq%9^=5K}==ul%9>jZnpX^)iYfX;5tNEx=wPFQ%~A z1hH$V+stv#V3wYh?@t!wTm+LAQmeqXsvJh?_nBciNG0ydyWv?wMH~U^Tg+p|bB(0x zdKWSjNTRI~cTR1Wuaammb_UX>rDXH%Jl$1UYF2)_%4T(!m8Dwf&|}@H9Hf=DH~`8z zb}+zSPV&9?F4@nCVimVqfPPEFBCqCaN}W=U^``hRT7S_<{y}n^(~D^z=B7c8`4s7k z02>aU$mV(vhpLxw7b1VyKRHl7LL5esQ>XR0^28%Ui{LwhP)%@Pz4TrRt!PKR$Ythd z-$|7HombD|e5~DI&su(;j7L^JW>A&VL5v&b*^)kd_R(u=}P{A4iYU zf%ekajJu}ZU{?Yu6zbVfpjdGYRp}lUs@3swdqOdO7N#eGDWoIs zUhhWP!foNA!Uc=sJ4=m#h zW`~BtFjXFQ9lwXY)0xA0uU%(n2A^CU4*g!efaHd=8$1(W36y-92cKHV`Fjf2ZZc1R zg^bh^cwgTMbDzupei5)*p}*XUX)S7T8D1)QZ_nrKv+#{lAQMb5* z_R!y%@+&G$KqZyVG6rRJ1f)bCuYlhY!nZ>~r!zafhomoCH?Dy1{)yxY#yV9PN`K= z1T|7MGYLMHaN z;3%#O5hJ!KBhxsAv6}bSlYW}=I(oxcTop`j;J!?jB-%XIJQEIsU}6$3zHz;^vb54DVx$=LzO3oTL;n2Rabl znbIlOq#^UZIb&}L>4a_rHzqeON;V9?lc8h_4599pQj zy)N{bikfjf(}1vL3fVD_5~41PmFQUegt`c zA{0D=;)&#q5N7L{EmYmCb6wpTHsGBoIOyO3L@j9pm+DwvCI!j$zDPnKCNu;FkkjY? z#~#KiHBNSuggmkDSI;64N@8n$XC_~ZeyiZfcsofWBrJWPJi|QE1369iDVPgfqtVI z^?SrEd!J*p<)Fk=5*08|w%+cF}n=gT*<7)Q<@i>A8Z1mp7> z-j-{08wAx)9P^6UT0AA9hPsqlL^wwrBg=SHO%%8B!OM~XWXAJN7>>ZBQ5Dy7k$qej zlK)1`QN+*j`WXJ0_2lW1MV!~K0kvs9L*v*we&GZioy#CoteL}~#SD$`yFUxu^WiO_ z4Fh*S9LEYg;aNU5+GcS*k=B2Mu#B$dW}YoOYiEiLXu7z7`EMPH&$mPTj|TRF-3!y{ zK{1gjG31(d#XhmtJs*|=5x)tASDL%?4MEFj%PFz)KYz5B>iL5dv92IkVgIphn5~+A zBDw*R6jJH+0@zC~RPnI4H zalf`ReI+I7^IZD$|G3-jv=CkhlN=Y8i*#*lP_b#n?yt<*cA6PKy4K7^=)VzPdZ>mvzo{>l2 zxSnk5)63W+VDvOAAI#y)I#x=iWUeN$Z=ONcSm7YDkMmjiv&Cj6_uw$*NXzJO&7?7< zI{z}_J?WEUdMshlky^^gObTqZr{Oaxg>v){QW)kAu{RE7tR;bYyLvN{9>tfW!94;+ zgzh^TbGVMIYE8!P{!K3lYv^b}1qB7=<+MBx=6=pk0ievFO)VYby!+c{{kfxkW(Ts?tx52;>n?%zUyR}HKYz_^P&W(va=B~Z*%=b&7=M2}pdAT@nTngq z+98bnofcZvxIYmIs$Z^=r!cP`t?g2g&h^ocX(v}PXTjmz@?oy3E;FxausggYQA-wY zSH>gh47o~LyZEvW#SEeI>j9QK9EpoI5qttQsD%m5bfIL9ed2a>XRJcWbofpkWgnt{ zfWbO&EWBoB$V2G;nR6vA>eGNFgqing&vs|?L2A%at9VLWnJof(6Fw*U;_&1JKR?cbG zf)Cua<}CRB>7NoE_TF3ewkqRqTYpfzC5j1lCAc!haCSDs&e=iRzcWmo-f2Z#!Ay5g zo=##BGh!VDi9Dx2Dz|e+c2gKp`(%ZpUlT@=0Q!#2yEdL-T2D|i2rbcUfy5I#vL@vh zyNZs$gV(p97P|G52lk}+?JOw`7|gwC*hT9j7HFg#Ps~(k804xFND9;z0n86y-*}IL zTAm3)>`NccpcP?b@YRri*Av0O{02wuaj+M~O3H#|T4V+Zcj)o56Th4|I^co)*G-6& zb;Mk>jn+lyQGplJwI=F*2y8r9+L3{JE@fWTlp6M1(cK=JGc>xEldr1Dfw_H9qhK7v zIO<#Dc0)A(I}Zq9z3rEHy$7%@fKzITUW9eWMm?UwyOwthe>`R{i( zVhK^5LWjTP%^gLnRC~5z_w0BBB!CSSrVmy`!(f#BclUs{qJ~6gnTIc9b?pBmrrnNP zB9`AC0)%uMvX17dbTDBK2Jx!LN)n0xN_Hvxx9)0Hpd`tXa{qnc z<35Q?L6#Vh(c~QTO8<|*!p)d&>dzqk-!C!~Gx>bJ%WClIZtlEZ0Hv~DwpLQYgPa)@ zIPb{R3@6NSwNZ}vZ7Xhfnd7BCqAJyt6a`{F#=b->UrHw1YFg5obK_G~HD;Ab>iW{= z_2elm)UeD*iV~QX!p)GA@;klZtl1uzjFjWByYgegTHD}PO!sD7&bkA4_t3Opf*$K( z8Lvhep1i1&j4#*z2oiFFBy1h9;X1+;2mo)Vm%F1=I0ZB&!0YR4YHI4)g+I%7w`yu? zQwyz%zoffYMJXvIKYa>4`*b~R1)>5#a2=ekGy=R|*VhdeEZu&{y1R2T%`(29ot{=$ zKOK+4<|oQ3n)THo;%pl6Cqnxm_?;MOwz`r$t7yBtqjc@aZLKlUm= z=Pgs#1CKMB%PzsuUisq6ZR46`Ok>RW@pNPK_{t}CUI9DkJaq|!*H22aPL+RscY3;r zTpFt*+ID^X=ipp!k*2q}qr}LHf`V{$pr>}Cq9Oy?R$rXuVR>(E4zmvQ=gp=4bnJ2? z|8^Z|B5yS$%!u@Y^6uF@G#h3lw--hikLO=L7J}kR|l@R33TEcr|X`S^g{ zc|GKyc&{MdomZ*q53V{4OH4qGHLp<>U>s|fro?#I%!uV+S~ zwnIu>k331j-f@C*=oCsF3zlOuE?Rn`p{k-uor{AI3s`IbA{X=F1|bqy!n7jFx-tpw zG-OVR{0UYw;THyZ4&K)8Rv|qu*BhPLDRtU|P#eRa^DpVgw~4({;<~=MfZ30Q zEo2ao_%8F`NZg7;;6*b%d99)Fyo+KE==ryv@Qh zOcxhHj*Nbq?94fiplmC8q_|cjPKU04<=7+sH_-Teeji-OpNVsppmd-gV4Ly4hmh^u z9{_jC3uPj-hkkF;Aw>;ke_(A836@k)AfdYV7lfJMgSZ2YRcwDlPuEP3)kKM@*V)QM z-koE=ADNFZUWM)bp++mFBWP7iFm4qCnpQ+W#T}?^LvbljuRGAj%57mlA27-K7{M!s zac#icLi7TcB!C&j0|mrIA?t*&tEM*^y$>5zZ`}(&JY|29bZ6KJroDZeu|Hklen0TI zSk1B_l|D+7I7|{jPQJIFSQa&DQvTHA%MCDIP?y;9d$`jxyFJdR?Xm6B z9ulJw1{qR$uf}IKxgCB`WE{DPWcq+=2#78JQH?57m!R8{<=!L%S#$eCNPyN-aHXZ2 z_TwsEimK?!D?b-zzzvwTUQh)wWjen;-D*N^S1AdF zw8(xtvJtpJ50G%fNz24?H39pOM9|YnWdJdwpE_!?!p~>gl0SoxylE1zCm9ISH@xKp zlyB5QoT$BT6Xqjk9ngXQq5-SQby3o7;+orQN1E5ay@C2Kpy|D=gT^azo3HuGcswmz zWFX_-IhG#*`sQ%iBAm~xN-jo*zYK-0{}bTV`li&e8(q)@%g?YF=PCGR{M8RIUrhN8 z0!0LlSn?lTFBSQM_fml}wP!rH1`hEi6;d}{d~m@f4Q=&I}m*nN44 zD%YyrH@wHi#Q(s&OLf+^D`avys7I&${5%%xBP(VhL5d}*J?inBI?khC1Z050kd5ri zm@P<_1-6wS_DHFj9IAhn>JZU=Md8wlX8vDBNAOi)gGXA92G=|O+U?oYpycG_DUH{y z&EAF6yHrgrEw%TsV$T=JM#rt5xHtUw?Yc)RIB(X;+*>H8Zx5Z89Wu<2Au~b9KFC?5 zBYz3&NUdTJap@<$*@(?(8`V~0&y3{7dNVr#_xCNffwtQbP0zXSAY7ABJ%}8ll=L1{$4!^}aqr2nEBQ;}cpJZ!5r`$_(R~u1aDyQ}3fq$;^*X&397N zE5DAchO%AjD3K#obLOn5@&g-29}*3Z#1cP_iXlU+iCv?CA;yN3mJe1Bq8^TH%(`-1 zkn#j{2A3$!D_0+^7Xvj7ySIb^uzT|k+yt|Q3yLX7aegu2PilwJgSzrIuy@IuE{mDi zPTCBELMT(P+Jm`*uL^7!{~fgKQ>q~!sNkW#4gE?pUE~WCz8U|0{1t_G`U|5Nded#- z>@v(fjvZ9oXXbBGjetfD&fLf7$G!5n1E}&qu{z~dNM@is#N1~>ZEN0XyU&xkd%fo- zn?AZtkd5BU z4BwSM;Y`p0R4G77CE60%aJJe>jSH?fn#K`Wnc%+k@#;J)b%0PD#}Ivpu*10)o?aAitg`sL)TfIMIfcSitvek>MA<;gr+02D zzy`!~dKLLVuxIWl!HmPvCn0Ddq8nIKIz-IGUFvHsWs)C#tlN97KtB*6c1!^2hd21mF+&;m?{ zu}9|JeXoqh#V~azu3#5($C9R5l@bf+ktTIuCX$C*T7yv zG*)av6krJgf$NH2XC1KW?O$kTxx-$ubnNQ#F-nPKh^6XT-@=mLwp@YwULLs#zM-?K zJ$bwx_e6EqFf#Vsp(C*!%u=5>NI#x4u{oXB9Uf(+8V6}X?m?T}GhZQpmkS2{AX_^y z$l0__Oj6fWgGZ&$i~RtYBZI_6Wn;kJh7lV8vjOQ`^sEPJ+&sN81t-XViS?=m&;!A5 z!vAlyonNWbL;48&SSy-e{64%*g!?N!lN5_aUo|T9iPaf)rf1psH@3D~mBBRU@H%p+ zPdlcifvg+ZErIOKjaV+meTct*8h?#=tp9$cBcg@m3WPR;k3rBccoFl}- z&f5){ksdOk!&>YN%`yh43bDcFmzyE%*8}*kbqEohmfS7}1H!G6J=48nGtS0+xw|6Y z{lEJ>n~HQI6lD}yj)knk`qXwl)#j%=t-Ed>+@S9%JlQNcT(=l!Wqr-f-X+1h9G+VB zSdb8pqlPB|+wVL!>Vy*|Phcz@TMP4PkD z0r!M>u&3j@E_~8IZbTw0lev(}!Pv_UZqh`mUZ@_UoKoD%4+ff$0$5(kaTMXDN1eRL z4}ju7hN8)&Qc@GqIr)B;sOEnKjGi64Ahci0(*0zn$ue;q&d7sN!-u4* z?$*=w*=4-^wYDYZHsfpSqy_otN06mF2KdnGz*sa*upnJh{2CgDErC6&O%0Y)07#RM zfK735?mFB3SNI{QhV%`B%|;%pAqOpoUO{v#27d2_KL`X8k%1goHkY-c*Im@`b!fs$ zs{W=OF0{4PzTXH=fqtgfGtf!An<~tZExlHvvfc5uANHZoz(zV<&7Ym`xIp;Tb3+`S zeqq=gv0~=}SMEmxb~$xaiHh16o@Wi&SiwvCz0a;8Vm@t2=)XHKvJ4e*)0WVWZORZ3 z_rjs#=2W^4$taz*iEVZsMDm|1s7*k89F0^XkG2d<;}MC!{XWZ?_e0)E?I()H{G=^% zz*A?2Bm<=;*!%%7)L5CwKB}C}j%Pyd8moTPj1kP4VyO!vT8xvdr~%t+uaVH*;hAop z^M(1jrSRF8xQYgeQI=Fd;}$0rE+WKLY%y*G$RLkza$$o!-E9*us{&wW8x@_^jV`YE zuXXLzvO0P1M9m>kaU%`3Q`dT(D=Kaz}U-|7G178yD zzm$VB*Mam4;(36ktsG2=j?~u}<64XB)I`9XI+z6T*nsyRW0+32zsO1d(IJ_kx&seK z=+fb!#qjT$r`bV9jp<5A3MVxcg!h3j zYnez~cYx;LeA%Hmd^)hOM5?M1Oi3LZXPL``5hFIg7}Q^Zs&;&bjzgI4y5jtS&%iEn- zM^h8m`+}RQFFGdX_y_Yl{Fw|KVOD%R+NWG3w+t3T6`<6oPl&nUdjWt7?Rwk3g#!BUNoskw2Pwko>z4xPfBRvycgG{ zM@fzQAiSM)4kv4nZ7iybWRSFV+sW}caAi=k{KDWszW=ZW75wrcVkueF>=yE40jXIR zBXDkbUYy`Z8iYpGh8eL+FXX}%hk;qmTRzlCvvcxo-_BYrL#Jrw^zAQj0Yahj1t=Yz z*ueFl2zwGt6vD3_Y{N-^2xT3)xsga6Bm|6p=Zopw<0dx%tn|b$)&yJ~DB|K_%e`6mG@BMIamQsA5CE!ycL@fx< z0Md0N%tLvGgibEt;WUi>F&7PA7K)j>H;oA zMp&Krk1?GMY=XB;U`7crnWI|xwKYQsDtApUd0z{FQ+L=N(f)QMMX zq(*K)&J+!&9{GP?rMZ|tDp_M^Mc}T3HS7LTF$h;7#>s%?-ChYGHd?bvEtIs*?Qh1A zWxe8e!-4h2gh6WZ@^|BfCvJ3g-H6oaKqtA?K}OhO7G&gi#s3XdXp%R4 ztgBR@=Emt4>lrWEDTexX7W=bSvf}81%kuv%M7@7ajL2sR-_!A$%FD~a^`)Ggk4#{e z2Fl4d(Ur>a&G&vyiH=5+3oLBK0D4}kOS2AJQ$SXr^7NDJ?(FRC@6RgS0(ktrFGc%6 z;)Rofd|WzGg=J`XXrzjU5D~G#D2QVbFi5PS2w|8~C|F~tkDw6rM-6HKE($|XcX0~x zGZrT1?Ch*Kq$4}_9Z2H2xH<`lot+)nOZ_;*PuGiW`3WH1E1O{qvQ2x)JN>RZ?iEAo zA|EUvr<=m*=n|nT-;GFdWQaTT-EcGonW0neX7w^3u=s6peeX5DC+0qYoM6|N+G5Y0 z*br?2W3`24#W4yo=aQG|4?;q4&rMKfBmIN-7x#!6!+eZSIUW?FG2hG*40~iS^uMEj zJNXcoy?-A_cDL^3-I)$zZybqs4gP?ct-2i(AT@TZJ7$v;Ej!-+T%*p_qRP===v+;9 zvwi46+82cHB(DP6*tq^;G(F-0 z?Dxb7+&M%kiCgQAn`aR`QynPm}x|FGYYm)khux6t2-_2Ym z3P^~xxI$#iq}qOIWaJNU4Dw<%Z52If&-@vmwz1SM?43kkTc84W0O?So!3A-)ehB2} zjWYN0UEsXJ%~F9z5O}5TuaFz@)k)G;?=K31G)~q*!?z4CH|2-GYD#5%Ec5RPObIjd zIVaxM7<{X#F-TlgF+*j=Y8p8p&vZ>$?u|K622JQoNAg#AvZvbCfd+s5f?_D>1U7DA zRtJ03D%mowDEjBqQq?Qw!uV|qi{m#TC!MQh%x>teRZ=vkq&L}{R(v*go)paSM&-r! zoGn|2paML=4Z&;_7xYxrWX~o^?VNW|Tw%?n5PjOCS{N9a^W#GlctI4$cjL9y%s7cv z((~KPxYb4_qTjBh6`saKL+v(~r?jMS*<}IXQ>c4BPtA027{=`#n{KlJ2&_$lJqlVt zE`Ku5F2;GW)aGSWrVwODTp%#q9AL!}J5pMjHmVQ?&E<1@lP@MuD#-UWwv;|gQhp%C z{;*}_A%Xbx@+=(;$IFK-a6E5e)k#=jP(~=!q4;`?S68qR9Gn6yMn;qa%`{tYOr;G8 z-|pf0bYQK_G#`U|0~qXykfxn#WkbSMFMjUpgK~d0M4a2v!9#HfHPzQ>@JrKWYCw19*Y83#rLEwI^3Rp0s*lLcTw@8X=4|Vh(k4zAE_@43OeFSzn_# zC$E*v-SruCrCwcs#2qzT_^627@fi39?Q@HZABrFL^1ahP-H_xF-4N&eP0XTeCr!bF zvx-?^C83>L7F==Bq3#6&iKP_~e^5S9a1{9VGIU4G_{Nb6;tSC{nVCY~`44Pl4_WVh zs!ldm0Wl_s4i^2S3To};VFqwH1J(zTZnAqqM@dbyGVkg4kF`IGS=yKmLd1(`v!o0V z-gL<6D*Cl~tftxjjP{hCgWJh@BbSt1*q&TC3`RQOLhoW|bW~c@ zp-P6VtM@o#gaN}T#jNdgzwIxDf*agRWF+sELK!-MxTg)2gMau?c+Bnf#AeNgTRV}M zeo&v_B3;Ad3A_#m7^dQb!{se?&c7=A_mBrWouNb-?!&x(Nrm_*{QkN7o{{6 zh5439c+@^}^*V{o>5jwYBgz&u3-&8rtFbW%gHH}5etRD|Dnx4~ht?(8!;h?i%%2$P zFRc`XbDwMg3tse{nYpgsRv#c|;V>n0$;rTlQiS=CJ}D5wZ&PgYl58F&n*$F9nxzo@ z>Zl%dsKPs=CXx1E$oqFbh=fH*lSdL)<>*J@+usE4_#tP@TqQEos4`RHn6 zUBeMKVzNkMy0`6Yj0D+l)H==#Bvz!yFZzh2jRSAAEJ$HA^=YZ8Z0Zuvl}mHMZ{`jn zBU7WJ5m`@OEuI%fmM00En+4nUR7cF7e`X9IzzXE$+m-OAJ>J;Y_FWM#NG&Zknh3j5 zn~}gO>}mwoY60EIaH1o%D?!uP{P;yyHl69*NS`=-9F)2&#`SXfD|y>?^EX z2O?NuZ~2k>X2e8<05x99V~el5kFL-T8nfERiJZpsTH+ZH!tg;eEE%pSKh=bgy~&x5 zS&zdR&6*HjANpY1>Vb12;<_5y%w&KkhE>XO!>gaMe@0RDk4KxIG%%}x!UuK+uIIQ^ z=_KAl?7nr56V^8BW6*6=j(>)nNO)sGj#x&V?W zncd-Xjl;a#2x;LXyaG*6R^FA08X~8`+Tzcp1qn=KFX+PI`#S*_5hCA^yIwyiz3;r< zL)<(w>a&Tn@;}szG&&4lmQ(T!DMpDjU8>Rh*5jSGqduBLxB9Rq$a%}m5TU7*LKwmN z6_9~H3y@ssQsg_b8&AlNhYr0^q?2Jj4idXoR0YA|_U(7h^J&qrosP`Hxvk{)!h1}s zo>qG!I1j$z>xcRvCzanby{J9<(4CvUXS>;rbuSz<3COGLjAul4MeEB*-nYm2jPar$ zo4APNrnkJh{qwKSK>JL>FJFNH1R9^c%uIsAgX>uVIyUqTJ421)ZD5 zScE|N2nqXjK1$VZm(4lkD>MNcz#6z^_UAs%U%qk^Z zMR`6r@-SDVa`bhw`d`ouVT5hSgfGXzZ+Lxr%d7(JM;V=^5hQ3vn3C<6oxv3PKecrZ8#VNOB}|@b$kUFp>Nn+&7VkjyNVW zyV+!o{i^HDP)~;~O!romRUd)3O8O-Zru&{cXKipLSv9~MmGd=ux_1kq<74k>!1!;` zF#Zd|{OOW%_BK1Su)p+ew*}x~F6Xk^G9qblWpkiU7+2T;{nBnSx;6kzRJ$v>>I^@g z(mPSa5Wirel&(P|;tD|Zy=9$R|0z{ub^6-cjiW(kWi%m<_~CKCGvQ-9>NyLwpuf74 zT0eX>U&0@{qw9;PHghYjA&X|)E%L{yTM|DD)~;v1uUFL{s#+a-wM9#wxJ&=Sth&l&V$7kI~ zo2XXy%*<=6Nj+e>S+OH>)@^eZH^NUDa(ftlJvmwW>zg`sPK>z;;CU|d1IVrzBR)Oz;3mcdYn)8njTQlQ}7w z`yM;*g<-6o6+5X2#rH$ahzz+L0iS)?Ri@WgrorC!1zjN89VkUD^;h2D;}<&TW9UuN zty=q3*V1Ig2w}@537w)uwQ!hj_SUy^EB|GKfGz&Npeq3?Ga6i&I$a{|T$j{hKV6k) z5T$>29g6#D`!H4l$M_CBvnAnBkO@}b2NpfNsX6VLZ!RMm>dK2w}m$bF{Uya;L z0vCQ?RWvOZXOJ~eF+GkoqO;`^2s%E7H~Crk=BG-4&KEGtjk+Q<1v(u?MX$>auHbiU zs6QO;N)=!!Z3GG&i(ntJ_V&9NOH+%tDQ5Rxnjs?TtuvbWpw6nRN|rQMI8Gcd_?9BfsW#?!J{NkIN}OuN?M$a?s;6E#<;W1G8gVLwdk3RP}0jO zA!_lMV4lL=)~thm#_yeczqf)!xcW?r0n2ydZD``5 zOm6Z&-B!GFGHSAZY9_vVs>J5&v03bbd+u@jZn1qS_asjSukM__3#MKNsGLFlr4iwZ z#a7P=d9713llp55a%)3ZPgZIxZUqG($qOc0!El;FnFw5^G$M*WL@-)JV!`3HH&6II zK`B84@OhA6vg_bWtqj3Q!38EHBGm7G`}lO95e4FMw@*}3K)Zc%Hki+HzLM$=~MEpP)c7&scbyAYs~iwi6612 z$zI}LRmWJ7Q|`-qEuMSOc>afnOI`S3*@eXw_+DI5*g?iGkaGfZdu87&4(s+}J`;&j zCC^Yd)HXe;UgCe(pfO8(2@Y;?CsuUiR0bJwLBpqkw~Z6GnOT_0Bp^bBn6yxGo%1!u z3e@k~o2q=KmkeI2D=<%EJ>7Egn?*4M*_fJtvFfY{kL3|)7y|3O-|&$6Lq^vhKYAxL zu?c;TyyXAT&42`hN*fh_e_HC-gln3zSX@)TFq7$U&oOXyZV4`B{iYR+X-y76feYj!(nP5Pjp)vA5kkFGRYF!TT80Qxy+N+|1rEfwV%1X{L2S~D+tA)89<%f}tMsabV%ce(N{x`(7nDsY&$EFgQ0&P1ETWmaygD#l_D<*&AMbblI@Bft`SCiX*qvr}qfaXBGWoUSvUbp)`_!Wrh z5-lfZL+BAAZF2j+=@Smt*>!EPK^(@Pf782Q-^}21Un*DSr0r#NotxdqWUD*c1mI0Y zg8prPfmz=_z<7^D@xE$2v7~-^l4puQwu-h5pr9*B4FZOoFCwD+MP({5udrh^?y)HI z&ws5%C%Xm{0K=!SulMaZZoD!!b2fz>HqA~@U0$lc_RQ~kGLFxYLdWR zUGS927gXVc<~dLh#mY<(%y)w{ru@O|jK?tPAwj~oW0Y0B!nbkz$l*=mgRuAaCN~!b z)s{*Kf3p2smfKY*9uxJsSbe*L)2d^AMT-ZiF=-y87C)b7Cl3lM~}&riT7j% zdtu&$BVUMt$nXCTQC}SuRrmc(N_TfjDBT?c0@5W&Bi-E%LpRdR017A|-6h>Hq;z*T z((m>8{?>Z`WDUdIxtx2>-k;hi5m5o0VF<^f+prt;9Mq&!6H(Q{OAy?7#ADHYHg9I` z^6p>8C}mDoKp`NJ3@xf(Mx=PC+AwxOIL3j46OQ(g8C~(P2CBpv_yITt0k+||3J7-@ zG6C8i&QbD``zfJQ(a&VB31eGa8m=Ite$E!gHw;#43g*D#iyeLoEJWY|Afla49Sr?W zEH(W!tm_n1=Py^MVDRQaB?v4F0#412+2t2j!8muIpU0*2;kyUrx|tmt$$EHB)dW(S zd$`+MvRZQryGPUrl(4@eY~;(@F1G^!oI?;~EE)UY-K|vuaLryvX!KS*nIPbCaOOAI z%c^7Rzud-$JHzW+aVOCn;+v2>i785CqymadHc_6Eh|JIAf#EyUUN}6|#|5xUdmGxA zo*kVL4FR|;Q=iFu|BOlM1ip$)UorO`&ukO=m^k3WT|-ryo7!cTRt;v01{x10`-(a6 zU9D(9fRO81YF!QbFo#|a`9du0W5R111gWNNtJ*Lb!osgB&4}zqC_9s#l0z)Z#EaHA zsj2}RAMl}2uRl+gKdI<(T*KuH zE>8iC6rP(#0+deJ_m+jX_AUq({w+N{04?vU1ijWU92bP$%IeU8&NfbNCjoWsHPM)3 zpvpIb4MzrLMk>TgniT~9z3U;zJ091)(QI9taprY-`g?5D`sgNA`BfSvPg~r7U2@Q?SalCGztU--_ zQ!Fn-IO36;sx_PT=@S))r8&E|xj9{J^-)jynb@?Hmf4LAhC%e3nMgQ>?d;FgvqKnjdxO=GaiE%0JuXB;iRyT-HW9h{ zesLs$FjTDkxRXg$Ug`7dZIsxBSktZuAH~RM0gs>@tLb0uD}!>kMg1(J z0vxVOK-*xVaQ*ihN+aIwN(`0LQf z+#Y`5KEtZ%pNgkWy~YkBB{u@RHIEAFJtgU*z(ws=tc+$pRY~ZweyemGrYt6JdF7$$ShWP*8z_egl5I-KU z)5W6Z!$3%@{?~CNIpNfO`3J-Df#;&Fcr-cm?DE5)MLp4n!!O!6GUiEIm-uS7nh`ZJ zjkoZ2XCP_ttrMAiv*4d%PhK_Zv(lFgCYz!|<>N1Y@-F3(5lXSc50=P>&gF{VWGE7g zFLT8h0o0VPgpDl(rmlkfbT(^l)G3Ta52TpjUL^8$Fta<9z{ zY`99jBZsaur&O~K`)Nv#Z`k9q3UK(VYk#5RN6=zxM^-hp{+dq5FX<25FQ3mEl!L}r zzH?JCRLwgY&4&PXxp(EnO#RTORt@2v9<(2Rt=$~0!96E$;;X25x&zKXBAuyWO34Ug zl57dMv6#*aRF6~C@ne?ckTJy}l5Aw&fb;U3kW8f(S3#zXOM0y)<8e%J+(PyEBM3OH zn52gyUp}VCpB#WKq~7Vkr~MFFelv_|UY)mTVxy7sHR0;d3}xQKCZ*47q^C{g46xl&h&FCg9GgQ`(a8|?iW>_ zSBamow^h6h-2KUjutrC7MS6vzRlyBS?bWrQwfHRmSQWMt?peyLw>H1boUDv3MRooe zlPE*gds23{dYrFu6Zn9tzAk|nY&$RdPbf@!p>g;@lzYz`}?ebW6|dQccK$&VIUheR&AQ&)49^W4t4`db?xF$UiZ5>i*X2a$XA1 za%BCqqde6nELj(qaQIqstXT5;82Bpd2_tX=4YKI{5Ebx{TlbJ*pnU*3{%W+W!o7@i zb&n!?&3-wK|7fUJBU?CMJd+K6R08vz7B0X_^*r%Aj(Mka zU20Yx^j6r@Bjhft^w$5(e^WN}?#EypkCb!W5SEFbJZ`9qsZB8x zpgvpC9N1pI3|G*^1dFLEDWb+IF+Wh3{rIC`sH3aIc&SyLS0p=g(um?+sX3ORV#D@v zmCEbXd2}TSL)I~vMv(EOO;!bswPH0T`N7?k<`B2}Cy3?~I%u@ZXk>Q2wi2?R>2a zxS3f>@Q^SuU(y= z;6Y+CTVumyL=0EA}XAf5K4 z^i%suG+un_wp5Y!j-48@V^}HyI*9>&xkYRok$e@ck_IW+)ky(=IbWBmUf9}OI^r4! zA?-0c6yFaVV#(~BPXTFEUZI-t1;5N^){Stt&e`4+S^{0qt#^ikLHIP$Wgd3x=db(D z2BCmU^4#CT>#H+<9iQp?Evd=*<>22xy`hP@);qo3qJQ8WHRXRE3M=Z0FpEZ9_Wnj- zD{6%6YQH;He$IJ1wv@A~*N2 zu3x5Ai6piA4u=5le_w*<^gO+*ursK^==z2QIIFS^bc-5o`9fRUC9!Icj!5z~jtxVF z{pEr~PD~5~%ix7UQucIwAHtR^IjDr{697%$?@8t9G!>O)_9-2=n}fagyUHny%oz|Q zoRWHOi;rF;Ouw$eOK(YS7J;&GRIRr5t6zTg9QM>I9V^7Uknbnps=Wr+{93dk^6$xlJ+^OJ{Fc_V*gV@y&|k`KR8OV&gnKHle93zO>@K zCbqVJ8+KWKd{$m{G27K=H5fSL5yv~hz9bs@v#-&?p{^MBiUMFP5E^>}#r-!@K0-e*EO=Yxe=r;uGvZotpWQ~A|NoQz;VZPk$dsw9iwdCj5)z5V^(g&GD~(T{YcH_JoiVecD6a-A z=7?}*X{o#=`KHoY7DPI%+#NS_AEAyEK9vZe5s)3SZS&d^Kdpt~Gn!%tQrOR(*SDAT*8 z;;|6S}CtQEK~sJ6x#IQJG9qID7+wm7-~OaaQ?v+oW# zG3{Ib>(}yEyE-7QlPO6pv;69e3pn(&#bCy@oncT${Cs-2!<4DE-ae2ep2e|7sYd1R z$bS>odYdTT)}!@6k!qC0WWZ%5&MoY(^!0qk^5KsupeQWjLU|5X;dHb|1`4Y za)pX|gDk%`szO<T{Xp5tQ$DXW#kvLx8b z*~!Hq@KGfBR_p69@xzcUZY_}IxFFEh_7|tyaBWsfiGa2A2VI(A^CXe7u4woara;uu zNI&ik;s?)a!geP8ByYQ$ju>qQMvkz0j(@KdKmv(O>GscU*$=uml@Gc3I)^L7-7rRQ zU=)p6(Ya~Y&{2fWJunQ4+tvXa5KCq~a>adI5hT6uLwO`7*A1zV*b^j6>Gk^~8wEXb zE2u}2#jS)zFXNf=4c4{*EJE&iDc6ItT=&*+)8h`Or$Vc{Rr)3VXUqN~qQ_|Q40Ph@ zz^Utn2s)J!8mb1$Ktu1g(xijix+kUsE$No_QZOxsgEUowIAcuD;baMcY`$iWU(~W# zmm%cE(f2;`7EX_unOQvGN!!4{Kuzu9{%Vg~S~L3RVG2ZcG*{fN-^~B6KWQwC`f(-&k`d^a?`+G}Wx29~j)5^l;B!bn%kC}B9sS4Uv) z`0sw6&~Ingd4K=5A?}1a7;)73jd5FidmV-#iT#3lZN(t{CR`tp%MeHI-e@D5Y6{fpM4Cue-> zP;9i7C!9gkeuAIO~r{v;WU}-n zdrvT%%ucMJc!x7n)y6kCc+Rt3*$4ES}> z7bZMZU8}&9XCBaJEv?(-&utC2v*eA`utf6j>Jy7FY_ocv{GZ-5OCm29EX`5_j-`Al|2Gb{z4i})2Uecr_N|a#8(*(-b4ui7V^7LaUSt! zgpm2QE5gmwX9QNB0^TwdMAka-VNw**?E9lV@EGD;QjTAU$~1oWC6{SO5b1E|rI58V zTH^J!W%wQ82#a>G=SAejIH7}Uc@?>!o=~)hQS+Hd7Owwi!LM4MvM-`___pY|CuKGD zBxOKDP`CWI!Ap&y0(@$7}}8ZhZUxy8wj>RWBDgSE0l zV;~hW<92_3=$(sR3a0E!(r%*Q_@(>#Zw+fqNVi0Tln4Fc8j90+ecAnAyA1_N*|$t z&8A*I^vL^U7Z?PoKh_7ZQZA>1HR-~=0wI%?$?PQsWDEsa|B?XN4f#TibPo#Q2t)7X z>TF`%_Ag)5)<3k~QuxT-35S0V6XL>oR8xVrt`> zEC-SkEKyNg?WfkRCVuOrp2|y(0>o!*-(H#}YLaF2Jk!2@0T8bkIG`NEo{XmM5-rTAHLB4jq#=TLW&>Fh@A;H)BI zz=lFWpDLw9*8W-iBLJ=s$kioLM1JRoA?d~5#`sI?lGKn593Q9zNbHM<9ui~UzCGMq zpz$>a%%$jSao8T)SWhsZMs#g_UEB@g79HzJPM(BhwPeQ!6BIneYx)dv&ziNGdzONb z2xIt8i3p`W%%xs54xa6|R{K8p#U1Ryq=}Hm^v!{BLLYMNU{?gASdOId08*Gh7a)%p z2CK#C@8Yu7loj58q}v(i@w_)|_zY9SXYq|}FIbOZEhzMCvtSE$e=r;SdQJnYN^fQX z*RwN+k{TVGX?KExY#>-011364!@lR6n+UDw41OwNl~P;PvpS_OXpIr@{d4XyeC4Dr zi4FlSM02K|14#?WoN6}m)o!}&acPn=sL`MXg|y9gpr;9oO=p;zqiG1+X(S5-d<=r6 z(F4hf;+b{KEufSHWI^Io*pu_d_j$X?N07Ym>q1LjjA&o4No{@tnRPeG)POI>7nAF7 z%e1W3l<>I=PZ@kV4!x$njcz@>rnD%oF>i8pw>eWIbU_p8U2!{b=Ru>=5PRMusOc?tC!CzDl#@>wx=S=v_j~o9N<23<5vcUkBG@bvrv+S4u;*P zQe0k=cGa^tR0(CropiI6$Gvq^kCl(V*=*5Tkr=(%l9IGEAZZt^HJ00PC34#(>t61| z8~Mb;p%kcw^i}0uc|m~Sci>8SRwcjSMWfkC~kg}n2f?@_e_yt?rvcJ1bboFy-5T6kEXzQ;*2)V*fd*q;cXFRxWzTfkN zbUFa>r*wrT;F9Eik4G~IeN(6D^CskSyEP)sXsWWPC58DbN@%N#eB6uT%|^EJ=g zJL~g4@H*N(h$s&sHxM@?naKO-pnogrCocD6mD@m7HYAfyD#1$h{ zRJp8+ zp!`Nj8)5Uigc4pMRvR}t0@E$|{Pap6{@x5wP7^(x32gD&R)h0U8r+|Bh%JuB;Iz(5arinp$qDsmrycG5@pFk>%*DY*% zFQdTio6?i_kTrxn%>{x+)Uu~bow~}7~RI85g z1O}dFbPow#5b#DDIL6Zq%mfLrLQP+m)Qv50!*!=nU8) z?e*}BYwa1{u1}lXSd~rw_)% zSCfZ^mtcc#31@3y5kCyJk4IX+Hc@p9ZL8g zZH$jl{xXyzy1yIzLrw3lGOKwx2Kac#F8b2NT=01ag$&PQ#3w@`*{RmSD1aVZbtVAy z81b1x0(jrp@YL~^HYQ^tB#_V!F-|(xGoTnE$}sxdr71|dfc{txA9Q2%!!H0h}rtTj2)?;>zPg?qgS-%L_D=xgHj_E;WARQ-26*K&kxl}{B@-P@dg zD91S3#1>X-0Upi1Dm-r8NXF2-nrnnRhp4jw@$U3nC%I%@ZH3BhB7C>Mcp`3X*iO$H z$>5F;;+G7@%vRdPgtM4gonD_2aoitQ;_pI#{!m)H5^8*RwT^zmLbuECnynt_x!T}4 ziucA@76p|Bl@)~%AWI$GuqfE)F*9KlN?}nJy;HEBiL*m1AjZu_DOD6=uy6uaFInaM z+Z1h6Hj97+Dlz{e30mXK;J=!DCn8Dv+;>J}<+JP+iUmS4U@7f1`l@J2lx!e&rqZ%RFSqJ%G7`hCT5UWAoPM12cvY{&pHkV*D=M)_+a-t};@H1;fZ;S4-l(yd^^wNAz*JI9y0iL}_~-~}X_YC(HP z=xtBpd`b(W*T5vKsUP3lGt>ola@-@_#)wlf*r@u81!G?oO!BQ-1~zV!wS!<&hKlH+ z$mKu_Akmz;Xu1$<7oC*fTiQ(iy5K5Nn=lXG^=~)iLL)7lp`Wr@!0QSBb^UKsQTm&N z{1yQwhTHue>*?aY-d_E7ANPmr12r|Z7!M=76KFiH+mQ!t%&no0%lgaT^;}&{c*#?| z@I1N_#=QNR_PyS7J8aJnV$;bL5KzpNjS%ylfm61~89E4EujPAkX;XRe%R1swogMK} z!pc;g#|LAQP9dk~uSa=ewf*JD^G?^{Q6XqSd>5F=J`T&{^qI2M4X!2)kCqgsvD90H z4H5p!S_2k^O%>6~F;92Lkm_WbAT^|Me>7?mIj*1xHsCv&TWgz)Zalz*_YY7Eh4m(_ z3veaj6@%VxB^cc13D~QBaqGTdmSRxW!^TF5!bvWcm60**4Dj!dCE3{6;CEPd%H$Zx zn8cDsPn_g)&#e9#k@GV)?V?}7oTH9|itC4-c zFY|m%bIn`gOyOn>Sy_~)&lybIV&-bILA10XCLc}+D62zuD^AP4SDM&KRUY0Q7MKE6 zEFkIPz-&3Bt`VkjG>?5p8x>Zu_RD1+mGq{!b$AHpvpQ^MP)^+*b}(sq@-EM|#P3MV zVN!uR6+iW_x__NrQ~}22CVZ>$dDFvssaf5jb1yL@*2B%%-@>1xhvu*65ZMz=cZ%Lf zT*12(^fBTsi_zGH0u!6*&m$7ki*joVwxC6jyF$d;XK35BqN!EyxQirc!tHNOP$by| ztpmP*N(OyVL95vk3ULGzNtTAg>j5VZI_0{0h$pZwPSGI-K{%-zeh)z^UkJhRzDlxp`ehGZ{U z+>d`Le&MmjpUyX5(8(N~$v58`h&M0xsv;z~zB&4n`9pl<@Gt;yxeyX^X#}Q?FB81pv#SyThzZ!uh^RIhHpp*Cdbd-T?JjEh$K*tIq9+C`A5AzsjnFemj&&Ym-dgjn+YxP0SETRC^#)^G zp8{{xN@CPoX=!|8@>qnR)S0~DtL=Hp zo7R&SCR}ME2*wFJ-PuHKDF|Xej!@c^10*&_`WEH!)N9F~`Dxrddy|DaN6}{HFq*%A z$^zs%*LR1L!E~`c!zVC*ae1!w_7$b-X|~bqo%vB8=Bi{d_zhtEP)J)M_8){X4%4?! z$Y&THEMqGmQdE*&p9^TeB5reuBs8NC*T0LCuazX?xm}b*Kb_iBfMWOtiv}bc^!PjR z3?{0V)n%MBTg{se!n+XM!r>GXl~55?g2Q!t&GB(pg9L2M>y_}8?2uS7j6B60)weqk zTOGy~MHRejz$iZQ$9x9q1z8@?4~H47i=IWc6JpOqZ^}iyHD{-!d*-;eLGG3ptIWzG zdR0fGuP+Wt>{ftfPsAZj<-Fm3p&1w=!0^U=Y|SG|(QYj043vIRJAm}<@vODb*a~x8 zbo-NUnbpLKheY4>*&|_NLca6H<`lF3aVOz= z=hAr>Z6%H83&=MBS^FUi4NB_uonDRES;eHS>5AVj>#D0MFd5)a)D_`LhnR;Hc%-)( zx6t4X`@ACW#EN5kPF`Q4Cg&9C8$Cz=$r{h}Nv?~G+5p{d=dWyqDP%w^w2KF1!Km|i z?9b`(U9i%WIP!Aev}L90L043=ZVl~)+zLf>3i8LBKKBmBRb&4aT*CbLbALYemaLg` zD4^Sq`)tTK0j1=}2$1u{x*Av+(U!IKjoz@XA3mP0ByK4M&c{8I!*aQQ8%Rm&;*4KO zth2hq_!H$iP_7ab+7aj4NiyjG{=*b>Eq^d4Qq|jV?W#b`gjzm zfSH5lFQKR+V#jUKkP|$6-Af1k7I4*Q*K!GGviie!;)W<1^i`GS1MYjHIyA{Fgw~MW zCZ!=Lj8(X{EKUoA{^5P=s5a)$tC2RQ$#a4na8Y#!4W9u$vt+0uk|-3=L< z`D_jWIU>6!O@z0l4hI_pA!#y-@nTKaPI*Q$QjTM&3|367R%^M|CgL{d@>`b+*|n9o0Wa#y9;#j~(-ud8@bs*WJSyx43m37^G(tBo#rM7fwU?!#kWz&;p@psM7Z?#Kt6lixZLaW*ffhBRlyqT~Ez z{lsjd550x@SfU#GSnboPVYOelEX*{~owHIrKroea?k<@GjDVFGc7^;1XB>p(h7B1U zQbU4}#N4h@whNlB#vcK8iHVf}L}i3dQJ$O{U|fZ)!^Z`$W^&rLFLxLM{ToNP8!(^q z1LT@7SoY5(1j2yp-udOrtP6sY^CjJHTMO8$y|8me{WUBAA$&glA9DJ*gOL06tMl#g z!r`F}1w}64l_&^pYZF;S6|~YdG6NvfndS8vyAELL(+nVo(y$u*_U)T0TsL5$M^v+4 zY2hVM$7e>Sh~~>$B3AkpyIUv|ERDjm$C*Q_{i1$5fy2oZo7uV$PO_&RhM%6BBZ z*~R#bD44*WHX%G8VwEF-cRf{L@u3)oYS3(KulHS=B`O^yN1!fhpyG?}hv%seI4+%& z74NR~gkefy#R}(PUp{c51@3Z(c~hOMcww_@J$0X03LjSq(yYohBxih}(mbgvoS^&s z<~sXeO~KInj8SrIR7-7*<=*=HUAMdNYM>?u$I`3LIg(>Flau79!jg{ET!1~UurV?I zqPY3PmNdNh?k7rXz$zwu_{x*~UwdT#kJzgkPn5|TbX$E3-O^A#)7@_=xX-(ed95dY zMg|(rI^VbNLNijHwoBnDKJ?An=fplFP&9MuC{JWX3&8LKHjv}S50}%?20I@Bs%OlS z((RP(>nBy5fM?C#G5#o(pPQ6gc7j)5pilJ`OLa=#T0%CO$XYqFUPNwFnF_L~tNA`R zH}dqHKJkGL(l&({Lma{0}Nz^r@rL|E8=WQcZLB z45UcNmaAY)(BZ~J&8L4@KdD6XSj)+CB6s;zq5ot41qSrG_WDe9%=`2Z;yv^zvA`zg z`f1G0^t9b!t=ZOslau~!vq6ONQ384TAMUT}bG^ftm;lJ^w!bW`ta)X3k-pzUv{+}q zSL;;~{&itvfIaRc2XnW5Ey-Wi(7@OGQ`X}k%4+g+RZY%1t<_SsYl{N&d*X!*tlTVp z$vOvST(^=B-(2}$r4nA&USBAiX}-BC~G|Z_#dn&CXcP)ne&OWS`7WO0oP>} zdg}x2yVt8MsXwDgv9T=%JRSzxSUO>*2nA=`VFY_X3;mo#U0jK(O^{CEd3>2-PUhfG zgNkf2n6e;MJihQ+<@MB^I*^wt1|~cehX46{*aaK|Iq9ExJqiqTY<|JvVJ)%5G5GtV z$SMJ)UW0>?3yAahr7Y@q!X(9j1UB{WaNoOJhKcBlex!JD^Mt*|}%GAf(#(~%c|vGxua%TJC!-_Z2BShrzR+xo~n8Thj4IWau@^|9A00Z}YrrtNKQKr*ceh~dHDMUN1G7)zu}@#^rjBMiZM056 z3+mCNc>e9Zy&dyDg;>m-FcoF_7d;d?6i#VZVeeA*GPa6e>gUuDnE!EC_ivQZO#f$x9}@DBM1vfchD3$&KFQu74vE6+}oFn zhmTnD!3?)0Z6RX^V>YXZQHMHU>?n^Q3*B4a7zMskc)diOM9R$<69Q~p@#(=Iqemtt z_5|e#@0Tg}zXFVBoC@7ZD(z2__fLKxDn?{;(~%TQ%@L2h?|bZHVmNMM?3evV^+;@p z27?JLHx|zuWnDyQg$|t<&2Em$itkrQs<%Y%M|gOV5OYBtXd5ix&8}eiZ7poJkZ@X1 z*#TL$Pzw+o0N6Z44o2&*&W_Z_S2L@e5baPG{7nEqzf&rO>-H4u{@Z9 z2^XTk9FE4Y579trrLjys-dFJ>^9ya+_ZITUCh)Hz}4O?gp~aJ%qXqmZ~<)5pR^}Kv;LVfhAd0P8a0-j)Dk? z_?b?ZJ`Z8Oi5#A{BG=z$mWNDTcRQJEN+ZIrf^VR|!_?u%2W}=lD5G2?Qg6@jRG*Tp ze~-u`3}pFhcmhmZ>ZVL$e;*Vny9-~h=cp;5tozO4Bx)Ik4gBW&X~o-~sTL)@GwGur zev!DT7WB7!$dekVutp*clAafd3s9+Z2AEz8LgN}X_Gb&qZjG?h)6iOPH<3Y#D9R+Q zY~2pqX89FS>kC|_R_g>Ha^G9EKi#{_ON-0|ypY{JIFu~UEXP6E_M5Db&)+obuZid&a)>E5Junb4zXPSqdeHft^FHMI;ImOa zPH4jq`V^(H<|0RPmAm_UDNN;av2bN(T9vn+x{A=Y^HyZihZR;!N=Y~wtEy1Eh}||? z{>f7%t7=Xj@I&guvfjt_PL)>#P!X(DG9aZ!Es-aDd(!8EtzglAUGoi1Tmfb^3ae?G$PR`Sh=EhjXPg71Wnddwl1FTlf1vOcV9+ zr#CFhgJqW%RJ+$}&k|Ml&*;ygGmU@n;LsV<{!C$(^t)G+%$&biC_-0Cx$RfjtC=KU zl@st2FOp0#cGG@oWA+s%d;TAWiD19O(L3zSWHgUSii8Lp75l7=r*Cqa286GJs0$qk z7>WF%I-c3qM?>NU92|t4eWx$<5|M@S(^HI6Q(gr(`;W#L&kB{u64t;KUCk=h@&O=F zgh;90%b8RFjS*R|0hA-h-HU2Oc>K8^N{49HNl(taX`IabRr(>N_xIF=6x`bk!Q%|P zq_Ei#0F&K#YsE-QVe66nk?j36VnwyP!0vCvk}RINwbUJa{OTNrg4e;gzleZ8B~g5X zW|tw~UJz7fN3vjARju3GHAp(3a=7|W3>=Ssuq?94VV7m6fB@g%n@Uh z5&^kW69D6F@W~OVz~M~H<;3%z!Ls=mb25xi1%@Zr2LORqaRr8Wr2bYL9P6{SdqBGs z7yqzJwz+0g48dua%>d*i^wT$cDE*YE%x@B-Y6OOnGKd;-4Y8bFo>jK&P`Ig6q<6kz zDXD@Tm=`d_%GB}fdGQ#cbj!HJ=yg_>3SZBjepy7zFs2W)UI)1b{rkdnt4P~N(&WdY zy$IlAd9zze^GQH#$!s0HaC4)q`R?5QjRW<*0o9-w7*O&vHB}Y6Ma^p}E1NkUh+Q70t`n|SaEaBllxjAVQdDBEgl!l6 zTvC6;jF}6G%n3cYavH9cjvz;lSt3hqj7`k5JCiv+e|y`#hmQZ{Gx>?U9Xy4qLpKc) zvt0za2H(IJL8Y4Z(C3pP@t8Gt^tdmd7`kD4s%c>#EdG`TM-HL&-c=Xg9;RSX((d>BHxVvx9@su<{rRM-r8Ya4s-ZdqldsCH2$nr+T-CVdj0kg$dM&NMF_He_CguV@?`V$S6BngOP(IK#$Z_{G&|% z<1zsEJ+NuH)#5rmhylPK5E=sPE)m&~t$ZzhW>`U$Dsr}We69RHxZ11OEjP zkzrfRkh>pm~TYC@YaIwuOz)F|ByXZ zQy};ER%B|&e-TK?4%?}sKX%5`7`Rk+Q^UJyQZbC&;gPd?jtZTB!3M~5x_wVBKP*sV zS@rM&mzxKSmMTY1q+7X#7P&Zs$@i=e{f1IZR`JM)W2p32WsJf7CFuYYe#{9Sd}~R+ zB?ShSqeq?|zk>N}A}6M!$u^<4$K2V{n7Z_+veKxbfv5?&9j+@>A`tzG-s&Wla1(-OpIOLXYv`FQBNwdF#P++~c!{grkanIrFeP~7< zIAkIT(8NIUsVEZV?*NIm{h_GX21fAiCZ@N9vODQx&Id`Yg;`JDtO}uh%;s3%g;Lly zWT9&{gKZ6UXpN8sg*3fM9^T61V5Q&{?F;X{A*{BHD7_I9-g9hDj4-ChGdLqT7fl#^ zE3x?76yL0yK|5>_NO}mDa0A_CcYcq^dH;!)rij?EbAco;eYOCP>PFmhT<}41+iEQoZLITKxRVi)C8-jACgCb@t?}{!rgsV9hVlusw{n(_gu93el_(pYOmq0 zA2S4QJoD>k4LLv#!D?rd61ffF^l%Hw8==h6?v8eN}WvU43cB-tarhkb( zG7&F4Q28O}_wdnDv=iFNvof|^Cx%W4?nDKnECMH8`l8!s+GJoJMDjfI>Cd9pX%+90 zbIkYc5;C#F-{MKt^LoWfetz0f>XC^51cG@WLZNM8NrIupnV$3Tk&DC`H_u(Uo6? z01oxLgnwaB2d5dchRr1#)qRVZ9UR1o=UhQp#Jl0xcIeNZ0}b}y`xM#B`C8kGbL~7Y zPw)FNyE%FILrtx;nhfwI`SnSUo_JH+LU468ap^XG03NM%hZ|%DtNlFVL9H2?+@;Ev=A{P@;^W6nK40YB#Q;PER5+PtXM>t7=GZnb8c~AkBw;u_w zxex+2{eG@=K!6zXA|RS<1_XvY1@(2jxNeG|8+J)I&`s2zdhwWPmI?e{9cqmm4M7q* zr8GiRf&IsfHFt=D(|;y5nJEn};{}6tJYz;CJXlmTY7mv!f~SUVrr8RbG53#I7#Wd* z>C-0YhD5~Q`j&;~7{Ps?w0k$7^A6@O%nS>SlFJJ#r3*6oLj}(h=lz`JVTp#&^{Z$1 z;j0Sh--iX82fS~P5b>N|6{>0vFd?-e0EwYuI#40RvO2AsV|WbaG`%#_Se0K*!X(09 zbjrBe;%b33s-t~jtwq|bHr)V5b$^u%hz&FcBex`HG~GZ^uGBr^^{5JrhpgzPKqLL$ zR|mp?SYv|YH7<{vkeNg`94{8I1i{A-SDc+!88C3fL9TZxKiQemUfA@l| z>uvx6MFnhEH=<60qiDp-g|3&_VQ=ELCbqvvX9J6R71XOb4BT; zUJ0gQsrE}zrDRzYK9@4zqz1^0kgN0Bt2Y54Z6Ap9ds-=8IUWruRQJaamBrA;b-&u zhKi9@mEIQ9&nv~_pZWNXCO^w54#BS`0I!~3m2ZNTdG6b547X)P~ zge-TrUw{x9``Xb^P zYc+0^5irG{{V0X4j`xunOm39ojTSfqr>SeXUSzJwd~2`HF3D;>xm*$4oN{^3z^A-r z=|OmAeY{$kf3|6D{A21+x4e?zpN!D{;P9)@%ec7x(t5>af62UnfYz7FtvTEhv=_9bt$%^ftT9Vk)3BofIpjoQcW~AtMlU-8wpSMk9 zdE~t(jCXA`Qf>4oivFiCglR)-5dZuWeg?s= zHHn`>w0E|z0D6bhj2vxAEJWt$D}yRp{VL`t3;%p#-0S{~lMHV5-`{CaE1&Ev0rlTJ zfvhB`0Zdsh(0yx--1!^PXBymp3N;-m?%_Xx?FM$;7m107^nB(DD)0{cbGTBE4Fuuj zgRi&=60h8JOj*v)BtSDYIdtpJ6zBSov#FmuTx#bP^9};}6+PUIQnVLyP4HR!P+F{2 zE%}WC5c~=Z+JWFcD5%1+fs0!&B8`rnTObuwj(z0XoSk)9l&D&t(r$X90a2nV8VQ|6 zk$j*M16VD0SPq)K<)Q9?J?oJ69thb^+=nbK9e2wQa9zf$0&cQMj-5BEU@hHExrsBI zw`c0&Qt``5FfjdlX8&5~qVx}H4sAUpkfUtLyqAYt-fgtEKq6sO zjjR66?ReiMEG1$^lV6bDNkhlg@&lf1oqEYFsonCJ{C9EZ(uj1}8!?NZWJv4&ojaq4 zb-&UjwMQ%N!U{`jDnCYY?m_5E_(%+7s8%x*L4ix4$`%>_q;4`FV}sTzF8A)@6p8W&0{@B0bw1v0s6nx@ACs)KTV~7!{uE1v9$W-xXjpgn z?g#V+E4OfICBRGD&+gw_+&Xvwpe7cnvAET@=XEt;q$5RmtOvPJIHz0r$Ue$0TO1|< zXcd0kC4{egYSA=clTzpk_s&jvt*rQmG?5$pcMR)9#8jEV_sjxV#Bjv>q^8*!iJrx^ zjwtJ5W~C#)X{5-q!~Uoeu=)>|xD(+V{J@mA4ZkM;QrhM%&}%R%*<4$Y^O7fc?rqCV zFYoPwQ&*aswUElYQm(o9(XGJ;>O|GPQ-8C|_RON+&a(WV*Md>yM)hTf6FTggSikq> zC}4G^)niVVKi0(o#-#hSfB?JBQ9H}g3N=?6=ZEVYVqAU$jB7v0xmcPEusj3C=lumK zN#AE=A2H=Eb!2Rw6!*K|N`JZy^_IXcc$QWzenW|*j&z-N5}#e^{|%lRyyNI>>9})p ztw{L+jZw?G1 zk3K#8Ka$QeDypu5!gNYENQrcZq`(Y~NOyO4cb6a~DV++^-Q7qH-CZM%bbr_P;}wIDhB*m(CWzVA%Vo zDFC-9TO>Qi!~0_~g?4ME=XxjK!&SiO4_Cj4fZW00{Xw`40;kPp=L45d6W zA{C@*6X2f>q$r_Ez8D|efkMReZP4x3ta#)qJt>C3Cljjbglc~Z%ngA?_TH0qiC*o{ zoe-ryroE*CQz*qVhS99;Ex(xaHI=iXa8ee*Ns5nP;HR09oL{6opT@xAj>VL3LVA)s z!A*oGMtXI=%)a*iLG+a`LrL8 zFkPd>I-ZX#ODztEoOmlEZk{wAATzf*{}txjkWk!jY3J`S-DXpf_$YuKJHIpsZqi%h z^;Tc$@PTJN8?LM}vd#^htcbUR^v}jl(?2~miF?7Oo9$L~3K0gWrZc5;~#g!nAl#^lvowUDQI^QhfS6 zX<%Rc*pH~vP;;g;I#mDR7=GNVMo$S3RUYpynBq@2>RSRR<@`Yi*_Uz6 zDkFTeCvWrY?C{t@zyKZrGG^!94Bu=My^bfiMdbzp$4VhDjKERLt@iz|B@G*oOb4tcZ~N>5%4npvS#>ID&8BjRYPTM2s9;qoyHs#W+>DI z^aRpD2$#>NCoVpkNH*wzweSAG+4i@4*|tBNU^iUH70)mdU9-p?0hA?Uy8nQs(yZ}` zl-7gnHSFJM1b;gL$2A94xdsilA5-fpiZFR%%>mX$w&&HHp(^C+$d;mq4eh=)mv@JN z%#wsp-;PM$|lY}WOh_)_rO1wsr&KTZTaatP{XtlJ$o!=;B%#iqL7wp56^ z2w!4+yJ=@D`<_MQJxTxWVuWm-8{f6bZ9M-d2`;JLUccThtES@GFWisB>bU`(m0gW)nJ~O>QIb}oE2 zta!NLE@CP^r@E0*bguR$EN!ynZA66uBO8ttC;9y_8oDkP{YtNh&Pi6wZ`2>6G^+#5 z#(XAiW1lUDqQe|Cb~j2UK3F5Q9e7NqaEi3)G21V1WG^d64o2pArUHkI*&8koKW_`J z$rM*HJFo1PD(gCSlQk)uzw4;sd6yBlYzezPV|kQ^7Xa5VMV&Wt&VVSL<1sD=os`P zwc;qcgXm$>;FPSzesoECkYe36=?2^tlw$+0Ba^KVMjSe1qS_ew^`0T# zUHd|-hcI681yct(nUdbzOuD{g*gSsJm3o9%8@rmm;#Bp z0rvGwZ^hAvARi&PA?q&&*kF2*d!XP8jT9TLRTe;(*%-Soro3A>_FpIK;Hg!Jdby?L0YUdb=JYVbXt{*{=C3}3=FDP_%mO18jKBF&TjE zZkq_7r(fZ}Px0`XRO;tO0H#Gq><|*iTH>IQC8f~o!Vi#j{r|e3Om2VXr1<-R?}&GO zUuKsZZZsd>JU`;O$_BI3!9YO`8}K_%bd_}|Kg^U^;JA5!$%(cdQcMNhrY{B1TU+e^ z?vDI3-oYbdn*9u$f5YnrRRSz|cxVsE-<2TaHeHo@A3l8e=jd~DbNl`)*An>Y@bGXf zMRZg`QZk*90$>m*c9z;|B)a4p3j`ujOFIJpJFeqBvbU|{oP8&a1a@x@a1G=rEGq1^ zQXzwtl!&IotK)t92mzuyO+|nakM?5~CNl#^KGwx=&WN$~5CZGOEjclW`Q7c4f*6jz z#4wcM8OANeBqM_wHNUhXK8@gu2&sQf1}9K1SL^dz>lazjVYJv+5D}Es~ zEWZ~6&lod9zrS~AIlExd$B>sySmNPvo-@BFk_dhd# zd;(f8Qz~ens?>l!*MVTX`$0iPvdJHCGW#T|%p|FB6F0v%I~tD%Y-etYZ$3jT1j>L6 z##WuV;oCi}obHm(Ay~P8!czb>S>MfkvoG?eJ?OorPwjX%~FV(^gL^az)t0SFlZGbtg7- zSd8cO$c-D21>THBDw}pn5@mImrxuVdHX#tTl zXjOi}==CGN`5H+EfCn1UH17ENEyIX;zhtVg3(RLrFjvjF?I5Uu1y`^7Q^gz%(b{t> z$ica+oq!;YBM9!O{6$2?Hh_sk@h!%p{1*-LVvjcncp3F$9yknn(z+6Eg zkz!vP84*;CwtNnO8~Ct(tukSP)>;Gq545Hrj(JcOYw&IUJuCNakfP|Pry5hM<&$%a zC*Z7jB}1V542a?S1cdACdR-4YBefBNC@*YLGYkO+dij&*AmrmTZpi79YB_+dxGgOC z88tUXNW5W+h$ta0P>>gea;2OwDEC2flEOq(2||r^_>Wpa4N?cNDBAwD*vt&Xavp2F zHWmYcLlk6@If(Z>R~$n2v{q+1S)e{F zt$JUep8w2ul1AYIq6Y%-7^p>R&_vxPhgein5-%~4-5|0LfenzCP&!~T0;)2kLgFSK zAHHIz++N=uW#kU5xja$lUpE~hSPV2sbupt>Xagr3N$8z_#U9Y#9l6sSBo|{?(<$0^ z+ebx$UEmg-{-DNYAV~XvoaAytTIe)zZeM zpaWfz8|a73YZ%A|XkMZ(ulhdxB~J^fZE6wJb@=F}sx#wK6u8;S%IpSMa*+{ynpY;{ zL<9a@lX=rLoebUlU(z8AKn*Sa{JrJ&;*R!mSYM^#XK>4}!tAr0 zr+ZG+z*ibE0ze?41$fD$)?~4$>y}Uvh8^dv!z?0jWE_dN*BJ}7ou7t9%B(p7zHiki zq|tow)|H!*6Zt>INk>e>+hBTxTGe7qm&SODJ5uBxpN1z(lX5?T91`+?QjT(34j)aVsL zZTejUgf(5)@nTwe>=xt`osS8p_r}aKtL5dlKieI=hss;ZaatOn1d0KEuf)N3&?lW^ zOyNczYN!w;Mb?%Tngkw2SRMAWJtunvxmTXv?nirecSySppeLq4Ui`bPWm*PyVkQz=sqhmh z+hAjf-^-(TI9|~KcHvXuOKYHPa#wlRCssqXRC^UL_qqDND#S<(b_U{f_ycI8FYdEubXZfv7gWAU<*SOWklFx%v z)wS}on!#}%K6hPPO2O2LmE}A1kKROX^m62V*&+%J>C1mqmzhRLbF^7a@?-&~8iqiY z?Tl}$HSw#^IN%rus(+51Fv^}F9>RhU|1eYN#@Zsv@`b5M5)WX3i1rE-7vSzeWDD^q z|J3WT_TCL_nwilbHT?hGaaf3haUbGyp^UdqU-ZhNwH78EGddOcm1Pl4#|U^))|L%i zH(-CP#uAAq21b|X08$~h_(Xj!{L}#c0fAc&@@vv2)q?UM8zL9(Mh&;Jfkzg!Z@(6L`8^FD}OwAEED^b6-k#3JwpPcOKSmpr?PC8md3|psTQEm=!?mQvJ`4Qq`rj zo#qm4*n1gn3MFhfJINPk6B%%+U7s};fI9~WVEPlfr89fvTaw1;EAd42MrH(j55y?I zVMtF*VO~70#P6AH2r?x`HZn#!znJ!hm)V_lChAKjd)L}y6yzc+Y?U{+ne6o(9^3W6 ze|tyq54%W(aDtq=7>-k}RzR)QUFzC$T2geN8VGy@49}1bg`2R4-OSpBr;2g5qxs8~zL2 z7XOfU(ubGEM>PfU^f`ecep-l>L1UBb!Y@yFcC9zH?X9g3eUVtXBK{A5_v9oc2O_?n zrBQf+M%<9W2nGRfziSrn2!2*o6s}Fs^4;3|LTg))tLEb#8grx~g_~`d^(W?Bu-%pDHkp@{|59VF}Fd;tv`4Tk)Fxj5?4Z zJNve%VWcZt*zxKjd?g&RE*=`9N#2ZqUSeSdO{cYPqshTF_U;F5cxO(EauRbHj?Sn5 z#YbxmKN-RjXyi+!i!Ty&yt1u#8m8vyIUYjySrGys%KH5MF;xn1Lhy#Q5V94S5{xi{Eis%hza_~3?gR+iIYLU)iq5lBINCjx9 z0jRAsPvotVC%(Q`o!Bp`4E6hsQoz{#(4g&1DMT>3)!WkaLG)`RO9*D0;ZS&j(3}=# zcF-U@2cb@#4h@*ENj_I3JN==~ZYYi%ur~xWq9Q)SvoJG%O~+lHY$PP8Z*JxX3=G;U zjrQq``OkK?@1fmo?&F_klPop{6sezZns%0{+8OM?0DbuQuO7#kbZl)p^z!*L>c+n? z6RRe3O-P9G2{&q4Izgsv60nQm60mDJ*NjW8rvyYoP3lEg_Yh96b-Hqk#&V$tTws+G zbEpQ`4*t<+ROI2Lsqx+9NCB*Fps%%^7e#tr7iFptuxkNk)^GqWl=KO8#`=O(BhJKs zRwE~n(VkOf!@s)YHbMKf6H@E`!xt!m;2qaLQOVptr(50f&oULbuYFe*!f|P9J0LA6 zyZAde)YP17H*;gv>a}%BZ$q32SrQ+6b;+fJX;X|=y+38xiPl30?u8zV3%Di{i;SqWSDNz+{6>p zYui}OmKORlLi+g)rQk;H>Ayqj&_z}OkgCY6rcZOD&*5ifZh^upoYRJt0coM@>a?cL z20e*ZT}?jprVO_R`%x&O_n-VGy+?_&e973yEAle-7AJ|xk!KcHqToE=&wmdEESpD8 zQH%00%!t*0@$x-a7|=%xlDRcZ&2Dg4B*juQt(akv>q|foD>^NIO#H(5t15piYVJ0x zdRWeU@I*bmt!_euN1N%1;UETXs%}^%&Ugu7Atc>jD7K2Cme&5b1Aj^Ish4^v*SPnq z7@ZjSmE>jzQJDT_to<(Q%<}o(Q)~RVMKh8BKc>a`;5#ko zFPX!hy#6wM-y+aK*P;Q&R;#YI|FdJlLjF};{2Okq0CXF>O)sVn@C~p4R(?zpf&z7TMEi2$mK39@527 z6(Y>~4&(dZaEX`MVOWPI;-S2G0*ZU7W5Y@d3*qI;T|Sj8(!<5zlKlia?{wg~%4Hdn zDvZ%yFORV(!8C|e+4;1Bg(afl>QIbtV7edHP>^!dz^XlbT?W}rzOR`8L1F_R_1(k)dyuch z&!2;-?Wux$vt*`xz ztgiyqx1?M|il;z+S;aEf_3h$oA=sL1-%LxFFCN7zTs{e3_#ic)*Hz1n=Eq*t-9b!`YRjkVULoq#d)2(7S-$#!l?C;^ho`Qp ztmCzgJj=-Y7-F^dGqxt%RRr8kLiy2}u(9e)q8f9#y4x*i6ju89HEuC6+zqP`oyeAs1PgKcs!gPx8|!5w^Ze@!(%639{h;H zioC!Hn+^zC{@3MzX=uPB*s2qA*}8Y^+3<_CF$oE~=;%@U7r9aUK*eEevQNXqk9P7w z@_8q5YyIz2W_Kvsn~3>K0oOxLd{}#>^8!T&_};(as>gLj=v_sL)LL(VG$W_zHTEo| z()1wPoON0L^&HdZAl5Px{WsXF(>AoYVzoRk-}Udn(1_f^FDUV8yS7(N3`ezx8Xt%9 zH1DGO(bD`$BxtVX#~hpZ(3ScW|Ltkno!vAZOKzNufKbpRBxqC|@PwH0IGf3B*XNIBnrq8qZ){hHmCopUyEKs=8jHMf`r6 z9#qm;utl&DzvC9=*x!g$6!gFe$6THsIO=_@s_YvgS!uItdvFpz%n|wfsi!S2^x?6z zL{Yb4Z}%(iNGFkuSmDhr-J>F=)swFvc?5b-q_R50iJ1b=2i80^u60?o&70O2| z;<#me8&PBM3T8Im25yckU!(QLNVlokW;U+;
)ZcaeWwgmMX-Is;d*lVaZ4POKU zvCiwW;tE(U6v)N`^e*Gi2HU*0tc~arm+p|b9lxPUtdM;1)lfdco64ArPTh%*VIrFG zTLM1vP~{LvBkgPd<;Kfdef3q>%Zsm$)nP^`(T57ayV8Zxo`6N}*_Hkh760`*yWJ=# zQC-hg#K`P*>1)0hx*^=l9p;0@RzX78c)C!ed@eoH6!-P&czBO9d7Il>>oW=p5Tk?D z34f{!mu%f{41B)sGDQqOrJ;|$O>9>eXL%EiceLbtds5Ipvq0^0F~n|AdS_zQhqCT} zem4KwJq-Wv_JEKvI&3m;$gkIsZ#CwlS*RWkGd#Z~T#4Q}U8Z0GJv!^7G($*Ia<-&$ zSL!64Y|R_(@)a*{OQ=rlJA6`RV87sK@>JZ~N=#Y@PinO*IDdOz+XeoZXW@i$lc(a+{k{6(jj7rU80`;BL zG^~Z~-55d(AsibGn(R?y3n8yLyvu<*2?FRLY`bmoxgyr4MF3Xxpg$eh-FYcw5cqbL zzs|M<3wT$LSo${uRV@^F{s6jp-Eq47E)wu49xX*yTlZ!pP`jr2em>Xld{*m{4X~O4 zT*=0&ufJRuUm~NxXL*hqc<(xdeoyu&-eO5R*9d9IWF!AkjsCUFDpljO+cV zseIv(CXz_`c3lVx6)T7gfAh0`A7tH_WVk(Yc9PeW<71Rf^rhpE7RP6W`=K?W_Rxs{ zIV+_2{zJ2`_j3(#dlwqNYqz_k~{quc&`uPnw0VgE#Iz$+R2_`elayxQ8LR`>Y$|5gmlN+&eq*i;mz@ zB8W>pR0@CE-56dpPbxUiA|516KTx$1(P|?SZi&vLsz(0CU)!`|8ute4; zMV7luNKotVV`5P*__o5Q11u|4SCsWXS^TVH{$?Fu+97Yuu;2YV{sfSLrdLL9p+&4G z;NzDzTVH@8Z;_@UP7D-muu>xMw|@0lQLlR*mGOYJ;a&b+6>>y2TNvS%E zT8bu=#Z@(wOJJJtD6Ql?@ii_=r|UBA*VmG^*P&?6efdPu?zsrHcs2mz(7h0=lW*WaTQ za+BPf#&JCtP5=a0I-t9Cd>EZ7j#30O{TW?H^0uS(Ds~SOax?$ODr6kxD`> zL{Ehi{7l`3T{CXCj!7Cat?;u=s76)TrTt4*^Au{J<~#HU#OF}-;|&Sp36z-`IX;RG zg724iWZuCCmmxAa33{MiWJ3x3gk8e0UBdoV-}MUdwhBu}oA8+4Lo;zy@3`#FqFGPb z^ccmD7W))e9Q&DdFKN*r`eTyCt$WqTb_14t~f{6lGdBOC{ z4|6SoDNi!VgM$7F!G!-(8>?$$RPK~mCN4lP>>yw10a+~#%ZvZ_H2*(1Fi{==njdia z5g+HowkU(ffjS46rI^I;54=S^xCLN^1vOlFLuw%G0&O1#&KGuPh2SCJ$TLCMNBOQR zhAKu~=Uqwx9{$~^$sQ(pes;fD*2LQ}bZ;j52d^wCCywL?_6*Nm$N6+Woml5w2hwF`)#@v#32ue(Km&ccS3^F^IKfkNCQVgF}L+*YrI+kv)v4->ESDVxSV(sI!b8+JTD{M3%g5 zIkl~_w6w6a@H}g|T@vv17@g`6yuznuW2;1$JQEl<|AVOWYX4oQF>TPFGTgMRp5jc6 zzZF+yaC|n1=JoaqI59uYw73-7iHZ;%VukkjnT%%bpMd5Xh>IUyN}ISneWnGn?CB5mH}Uggcf)23hA<__!TCoO#$u$ zhb=;y+2V%g5ZG{jV#qo*QZa>bN-;zg4&k_ofmP{b==Bj;GaHjfmz=7hUW@kp9{9=% zeeNU^v>xq;+hFV-SYy)e0p4XjjmLWmy9P$o-$9|SUtG3p(1NAbqVw33XE!RO$t59b zYSOV>2|OCAu~PldAb4MUB@)!MIk1>1^4}WWywB*#v?M1Ii>VZAv3cT` z%H5YYG*;=ev`**OqrEBYc1mq~h>TKmQ-`-5TbnSMTX*Lb)j^M_-}Jd-LZ{OQP9ig-wRBgRkny7%hIoi{6>eep3oqqE1awGc&H9-AqpwHq@S@}Z%g+T5gh54 zCI%P{bcb}7tpf%U0ZJEX64~@d91$RP4L%$1={@kEv9i+fT2U);8Z$HJC#7Ns>96nY zLxdIRg_9!Q6Wf-ieUv9l{EW&}>Q0C-2@KO1D)Fqa`{pCC_0W zT_{M3FhQ72@`IhzIPM1DEqG{-n1M@A9wiP1p}2H`7c2EU$LBcdtzk5X`QGIbIKnHH z7`BWGEr>aXK#(D6mrEi;?)iqd-Eob;>TNd4RV36H&F|yrXS)2PP=ukNj2Ix{VJ}D;Gq~ZL@+b1q7%s6FV#B=NYFLOOTT>BTNl{@2Q{X{P(He6#|2z zfiS-mRF8p^3!}ntcxI$W#`if@nrWT{Uwf~BzTYx|t!cymdm;>>X~QpAX-~w5w9L>` zIxSA*itIKbw}ga6_-cu~^hgR6o;na3Pu?wD1)uf9OME#l^hYpiS0acTy(=6+>s$Cb1aPV@AqVIYX^7ah(1H>$8NcjK=l| zh2v4huyTVCd@H1RLqx&jE#J$f2|`zWEm&1w`eyGUoDv!{wR+Oe>5_|$s#d!4jc$6M zWz61)w=G$p3EfK*GD6T5UD$$UmVT0tAXMSv>bh;Q<(_|_ z=-jgmozZ5PL`U!>4z1;OanU(_pqJnyZY^8zh;QY-q{t(Ofs)-h3UAd;Q`YnL(xB^1 zi6R;gd;*+22n&JKOmo1cbZV8hjZGxG6V~^Utt$q1#jEl<{h(v=qaI;8;uWF#j51Hd zMccQ@o^i+R<}sRmrLoJ8whI`3T%YhH-s7T?!q;wCt&H!bXj};-b({daL<^NXk{wSR zD^%9LXUYr#FNC-c1&F>T!QQY-h`MnW$Q!I%-;!6{&Pl71#bz(#VVlqver8AcX<)pU z^IiDpKG)di4pm~!fS$^}Y7pg_&dxAxYApLARMflQ1WGui`Jm+!Og&ob!g?k}S1Tif zuS$#Xlw?=S9_|(lZwvwn{HpNd<*S?U+O2;xh&xOn8>^L%@>AxhVs0P#(t1kv)+9Xs zP?B|Vz8v&V11{fcYiymFovJn1^S`d_?>#loJ4Lk!ryWv&@Oa;d;4TZUZSAqe^4rrQmIYIr|2d*lgbeeM8iEc~T z=x$x51av4o!#u)+sNsds%eqw-osH~t={w$<&(z^tBF;oD%)_F&ZyFsxEUF`UEVM`- z?0(ofhC--B`LV_?qGHQHIrIjQnK#uRFK^Z(4fl^_(S2hr9OB~{Br2oQ=5xJjsIG>e zHpQ81Phl>Xyu){xAznD?i?>#TH~b7O41Z@M@YsWXOc)Q%zx9zQNuq^gEh)iN$<&)= zqAIRs+i9L$@|6>t;UjMm0c+`b{!G%nf@NSSC?Lqxv~4tMsZOK|s~H)%cKG%PW)5zX z>*KOghYh-op|OHMN{Z(=>Y>H_&>E)uqSUpI6~A1$e(PMEVHv|Ga?$0xXo(e~`J zU43G+fd!%A7h>qZ4Wnehw&H_DZS_5&#uCaxIBxx9{k*}HYQ)3BG5%S z{*T4)_V+AxK1t7pKfj}~LV|um=r~ZONgxafYQ*7jA{dqeS`ALE>3>|=Bcxo1slBoA z89HkZmn6gX#4@Kumg7|ju-UPc8v-vq@b>}Q!ExTordOEO+j{HzoJkStX zg{9dI#on2Y>C~}av(iMv|C>G4f&-RYS#HYLNH;{fdpF@>C#wlp(A-p%FQkrqS2DcUn3yaBY5(tdR#PWHM6!wU%uO_e3^(_4935&k}pV5HXJ_;ng%Qt|3s|z)=k*Dr$aZ(#=d}R8&-cKDGPaZKM54BhZAH29dYj67u$_sU!n!j{Ji4w&Qp8U41HbnJ1m*#s&^}?{r z7OB{F`~ckR)o`N0iP0SykMoj+*gIzID>!d`7Px~0e5)C6hy1GVOkX-8Zs$D1lGlyK zU)t{IhXQaxmUhC8IJj--3yU+~23R&2n^j21oG=5TqF5Ey8l}+On!spon!q|2Qzh?_ zxT^uwXGF|p^Vx#?P0+rivSnJarQ*e#s49teuF`0Yy6Gcj8`kOUlg`TqZ(1`LYB)VM4_#Wj-c$e0E zh_Q|Gyog|fQD!I=(aNB^v)RQiK99pg%cWXRX3E)#+ersrEXEO0j^Dv+2-sABTPlPc z)Fe_o7$85t@{tI()Lo+V#qGq#ZSO_~Jgisg`tl=t*grH9%OH0%24#mNA$Be&BRcV* zsKd6hLAv`|UeXzJAwkrANwx_<{1Bw}m@VtXfS`Vrk2;<%x6KUM#Q>LKeGDqJJUv8| zCXQAkfUFIF>#hdyiL~ zf8-UpCKyAB+rux%wA%}_KkqG1NpKHsv^HGF{bv!LCJRxIFGd6pROUv7X2Q0T7Q9y^ zwflEJ#R#+ZsO?Pi$`ZafW0mNBQKOrTXO|9$uSp6yd6YY36BnjVPFz$F7LQVwaUC>a zh(9Aerg0*En^rR8^piKs4^oT2RvtuGCvn>Me~_L%W^5B)wId}Mrj-p*!$ z{n;ng6!VdVyaT%(?><8s3X%rm=zY+EaTg2~zV-ER|3H!wL6ABeMnCQhS7ayYnBku9 z*-Z49(8p<}PZ~D~n2#1)`xp;!pH6W7KYt#zJE%Bxm(KmpvfTXs^eOH*=kppDq1D{T zrSa?5l?=&pFQT zsN|!`lU55SrP?B>h0O@O+5Y20f>6)1VR^1MAw~lww)3HO0rByoOL^d#oJ_IPO|iqF(=M~G^1bl~aiy$9 zuKH^eoUAkg?Iv=MVu>!vMG*yOT$K--$w~;M0+HmA%gy}+My}APBtxBTY5WIL#Jz&^ z-m|yRk5^BbNs4aUObM+6vv*8pJFQV35U6pz(ON**uOew5UOf$gJIVXqS>x})AV&ei z?^>}PnaZ~Ni2}ad60`D+`rd*=eD*nksRa%2SHP2j!PHqm^vTe~kGb!9A*V9uKxGhg zn_UdYp$F&P?=`^XP|q~C~faS#UL`JE}l~8F`G~W8B{g$A@s|)Mr*VBpX|354JIxX%K^Mb{+b0a;wCVuH zed3}@U3I$gP?|29$GzQauv7WwcSn)5wv$g&kf|M-uNhnIu5x9^onK>BiOHRk^W5{? zrlonDuMHAJ6BvxWvwnG(Zt~c*($6fX`A@~c8VKX*=PQ^>m zy8VJ*sDldU*BCK#GgH={V{K2q(KqwDqhVXQL>H=d^HUe|!>na*!>sp)M<)yugV}XD zs#3NtOf^9Y&TeQfn5|^cU{D=?5y%s<2(&|W0F36qCjGE4t(C5W01EfMaGprWoNLHk+Jm6}&K0e`Wya+au^*VWh$jtNAAtk$q>i4S zsxA8p4q+MrR4iG-KdWwO+-lpjQqt|WjulGM36vn5r}?0v&VFLW5Q)H^EitB1QQm?u z-j??=PxUZ01HV@01@Wz>WoTk~rUS~q!L3DUYiTJ7hnbn1|DqUx);aC$?)GP+;?Zql zI>^h*=j7zf5`_J|1bkDrJ;7I^FL!;*%cOoK7=G2q$9A6>k9WL`n$J#8F$1J&WoarZ zV9A_wihll7Nn+42G*mRgoG>G;vidk=+x<&!(!K;%5rHl(C1vZ65syje6nCl$^PJN} z=s#q(m+Uk#F(V|Ktg;af4vvQs1+8evTG(k5qy=|Eh4>pr`ocuBQm>~4eEh>`I1T6}WaLpeQbR^n~GB^>Ha|ssi~=E_=p9Vm1!Gzc%44nD#Eu9rAE5TPH=s4iBdXkQtW{sYCYj# z8GU@K1huUCJ0{_#3(|9z4A2Z-X1A8pG8zw$d<0Q8sX#Aqsle@5Ia12QN~=B89eyU4 zmJ6J5#5FkXb=%1$uOPI%d^4=L^S*N`BKXT|e1qjZdECwdd#2M-j095NlI!XQ^J=$8sp@%bax`wHjl(L|yv{y|_87c_#oUe9H9k{8 z@d^=sL-B-bFPL=Z&BWdggF(y-r@?4my+EpzeZcUCX4ZwAC)k3`Jqaxnz82lX z&?iTGDb;!$>?xNAL-lY!#M`m~%ir}lUFWaPaJYnA!unU+U9%HqLa&H`7iYrSQF-0x zk3G%#<=;nMZNuQYWLTq>mE5T{!SY18vy7n0CY$e{^+D^!3J9%5g!h4yC7Y}BlHcgF z`{K6r%wzHnDjXOW;By;H9N+p(iwzId?4O&KBqgB0#QyXsh`s&>3XQ=e8{hIipLV*b ziM0LpP?QcI@g|9{QDiv?P-0$hyF@GW7lw8N*1bh6AAk80e$^oVB`qhWB5Z!sfDZDeA8$|ii z_Cs$sWe)PaQ3j+e)Cotz-|$)`M~LbsC2)QjxPuseUqo=LHy%4)M$Z1e@!gBn)#WE+ zT6qfD7G*rIxBzf165)iesi)zDHet(T|I~+bkE!iVd;`Y&J!k2H9QBy}?QpJEZgPyb zgxbyJ%dfB}RZ`;8&YN2@xxs(nI;Q#bH1v-GUmrKf!eZzquVg=Z)m+s@cm7HVO?r&W z`}0*(kAdlRCtfs8#Jk2%6WZM){!Y;lOVDp?0~Vf7`lmj_uc?Nd+Frh6v613Pc@8wX z)iUX}nK(m6TY@B2I5@oJLs}my$kY#+l>icc3%RDsWgv&w6UjBGs&U$O;NRiL6JNcI zZ~2;~5`R?}_LV}D)dbge_Ic>!5-oV@|usMXg$snB4wLAPS?SD#EN!ALI z+`ujcaXRIFe(-&hPM~FvY=b@fc|(aZ=#T1KY)0|77>936{I|_%3GY*zvRI$qOvEy| zb>QkVJi8olr~3Z!N4Gl7)?xCbM(o8aH-isxD+!D6H6%v-AS^zKAjm>#>^N1>ZpZTd zZ^4HzU%<7`uGJ+Z)6|H5TH9wF=st1bi(Z^sChWD|p#ll$TfF>A)cL4yS#Y7V(}YR) z4x~xs^F=wbY!0d1wFYmK{h-_`9O=y`yV)bmO>m}b@30mtE_gHA@V}Qkc40rA4-qI& ziiAjszsWZf@s^b#<fQ$MrmFU#UP8Mwdm z=u62TZSI(qz#g_-RpKyxQ<-X2((w&nIz9;oMea5ztdI(L`(rC%HuG6myl#K5Gf4YH z_1Pmz5W~13uFs__D?;B>q+6d{$*0oLV)JsoA}u<ip@lI1VAzxrIeB}F zmzN*}r8vNi!<>|Gtqp=j(dO6Yjisr#xg&L>(B zBjkO>$kKbU`d99(I(UsWm^XcgwD+N@+1QaViW)9sILm5zr2`gVuN&JU=o1`PiOE1VyJcoFEK{Knb1W*HrV(FUpx`+nlH8v8_SQ zCuWAh8GGCG!K+Z8$f|27w$t-}RK0an+f5s-jRbdhDemqXC|VqfI}~>>RtQks-3ij- zPJ!Z9N}+`mFYfNe_5Aw0-?z>>f3U(zh%iZJ?%Dgg_edk{T1`1|d|N@e3Z$NX9oL%$ zw$uGGez8RV$-2qoWjnm+_Xae5Z&x>h&*eN0t>r5JBMn|aLbCpm6ldIzHtEtBFOp+* z@g7z>pVZ9_MYR$43jr*hL#$V5Q>K}AE5GJiv%Pn{9(r2=(O@M|Fw1u%AiTw|o5)@~ zDIsF0RVjFJbo>{0Sw*4Abd95Af}*z6r!t7yuo%VgpHC-UTpTYfi~2W|>5clgyb{gZo_Wm2#+) zXZRs2%Kp-H)r~;ahw*QD|3PNNNZd*)-a#(TMetU$*~3&i(_Bu|Y{vH+v8*MfB*`Aa zkti-$9~AI{BH(!xfi{3o3)554Yf9LRAe7B%-Kzr+Ufns0mXPe_(ZER63!~Ino-YO) z2Jp^-N^s->Uz1Ef#<4*?6h4 zF~j_x3{D36N&wInrbxKwnsX{#tsMAf;NJzPcz0yyr?ylHwIx?T%Z@&^(J+nsXwM3- zp6zPIpLJQHnJw2LVe$B1PNmOF?=pO)v z@x&iR@rm8{)LrN*_o)Jx3edDTqfQnW*>){Y`hXXkuW1l@z>DcfcS%OfcQl)YFjj=% zGY?+-)r6|qn!tL8m`ga;F4f}xw)D}=3jQ`opKz5?$;2|6w8v9lDV^q|6A@?SA8vb~ z4h=3v=OwncXWZ}>5>YJdj-r0UR{b~T%}{nH8gX=NYrAR zEwgbSyaFzjpjw>>RF@?_+dZ8?IWf3OJ2B9-AJToSwDof02Dd9(Aax$GDNDB{dr@StY{~g9s8^_o3%O+rv&r zV!omp?JH{`^ZAP!k11FQvP|^M0Kl}!fZ;_Vi&A@^JAK_Oo31Y<} zAFWdPv+4qpl4BvPX2^OZZhuTeaoeD%=~0Bq&e2Fgsn|@b?~ywm2`%Bn@+0ehryJff zi4eQZV;lN8Hb#j-B~Tvr_>pY7b2gxAT;?BE2W}guHn~Sb_k^XOPBaxu78WUY^Wr1N zZ6IR~gFkxEW1%SKw^pkH2#~@a&Va>?Hp_ZIW9p>;{&%6~0tvE{HK$tHFZG(a(#^W# z-7z3V!OOe6(nWkl^i*Sudk!c2Q8EzQ&C+}$ zHStkk)~RFYt8p4xNX(1%H}G~279~$&ZmTL9S-0QU6USDv4W0V^TStT)i8C*p*XyGF zjf4LvVv}U)Hkj|e2Ie(5$ZTuw(H;@pnA!u^_;Q!x_khuzr>x!gC$o(?9d~`(e<*wp z=I32R-}^i((C92WXqCIg+fuM{u=(~>B>HA1l#gpq3r;pSh8y&=Ga>Gw-00#eyI){H z3S}YuNJWXedhb>OC4glK-9v+&1tw7kU`h!cUrgDy#)(ETl@s^rw#Y(mck^PVI_>+l*~H>tJ)8e(SjDS2X;m>w;pJ0XWI*aI=FiyL*F_!bN=v zmdZz)q9|Bo9gJ*PvU!IY2^U7l+_>a|wp|(FUJBRCg7CZ%F8k-DpGD4!H0`C!V3Yxu z22l80{#$&|2!6{=y3e!omI_*2J~WZ%*11Lt7uvJ9Jpw-gS%J4axaw@cMMG zbuxBoqZ4ygzq{m=Jt0wXprEOfAI;Rk30B~jyqT}P!qb^b2)LH{?LB$XKB(UFo4PTI zpWUcf_Ol#p-js?_HIiLelRWdsT+xOX#D2V1O=#~0Vr4xAnlUd*bAL%gryACivi6pMQ#qP;vTAxq*O_x@} zwKPW;P^^^^pHSB8x0bC0QG1)Tbaq<$k;1g zDBpLE9NxNg4Vb%Olb;aUOt+BKtV;P5=hZ~ky*xKhQ2cgIy6Rgm^=;q4O0iiQJM5O^YWEFiC@dP%j*0 zfN#v5rQBJ=J#j`%U>KKfA6?PBS4(SwtAUU7K%uK^+KLKr{b^(!hWF~`4dLF-CrAv6 zlQ<502gX0^ixs9S)0gnL#U(eij06xSHRc%JFV+%EE3rySCtsJA{RhWy(*J=N=-#I( z_pQ2evTs5zewN8H^`0dDe;`XY=8vJD!ZNHip`v2%93}G`l+y`!1c%{3VP`H^C zL4AFi4V|hSye`@e$!%F?FhAj>?QD4Q3syw)RBFIwfBBp(#I(QlaF`WIJ~Nanz_79Z znUfXHC?wu-GZW=Afe~{kFnZT&&|h|gKR!_z=&f60wxyyXLNb@c)7sI}EGF}7YN}Q% z!H9H4Z<&bk333t<1(mc^ijtN78%8rI(Ihc}x-h$iCUqm-I(ixsz}d<2+cEFxD_D&k z*e?r1-e;cgiYS5Z!sVX)ET@MS-q*bI5$JKegdD8x<$yKZrwgJJ!*{Up$;HtMc076C zN`2823+T7k$!YXuvt_}e?fNs@!m2Fh&}e3v+|ag+tE}Fy)*9f3pw`1MV#7tjBH?@V zmgw&+CT|D|UXcTnIsSPrfEI<;p$lw7wtYbzV(47SxuTtGU2Ck!HtW+hqb%+3wDXnn z^}+m(#8j8DVa2JZ_?`4Hg(9oo4ZhuA7XYVJ^5CTfyfX zAs-QxMO}i?AVBzasQI6Zd&5)OV!{Fg+rO6dtFIt zHyEoV6dc2=lm3Z-_({?+x=)P4r~)40mvo{2m{qDR3aqJRn*evsf}*mVHz$0BV7)VACEHTe23l^spp8!;(2MbhFR!DsmRsPQA*1BS$TL@aqxOL%f@pN2I(o12r3uCN!o#eh5bJEv`rus!KpLjl{qPO|MM}F+=U}1SH>S6C0NV!*Jt$g(P{ejLiUu zlxGdVVo8f%Ij`i3%Bs0TVWf~N0_LAn)J)%PQ3}J<#`7&{Y+7}_hEdA&d zB8^&z!upQJjassqNA!8QNJ5nOS)!;)bvpPmAK zm#(Jz)TKjFF*hyhvTrM>bT`arDd@kRL67Ymp-OaICq7>T#f(7bWojP zKJ;0buN|tSFQ5a+Hvx;aEmAe?TNdlaa_GRkUdB8HC2u#EbENJXMdlTCU^#u}Rc0>H z-xAvinH;`m;`B%zi$cCf^E&0~TKY;4`z~{Jrr6ln(;2IagoK0w`Wds zby{;dc2vL!npMB(;4|D~g*J-#Y?`&6{Mg@pFAM)FWk@5wk0d&I(&y;?^b-vl!QW_U z!Dkk^Oj^5_pUD!JfBW8@G->VxPq5xHP8RKei13Z>AZJP)lv>l(RG5JY93*v204x$g z1g+i0fXXlEP)g(Y3}WD+oZ5dRB)Kb-mqwGD&3%w|rMqkv&OYeT@c_2=88oQh!d-Kj zrceuxxblPf2`4jg9c`sjNj#!G@$#@ee6CS{-sHPk?nH(iaZdBBK!pU3S$Zt}`dE(kHBO z_hvk*TzWn=Ki8uAWlc+F`138qtk7f3xpHo3CU6j=0#Rg7{!sO_-9>-pZdc&r)|c@= zX~btoooeo&-BwqT6aa5VC&ln~=equ8^4R;%R1UySNC{hy{d_x7XVC;Gbstswrk9wf zR}e~6VmOhz_l#)7l0S%eyDu$BqH++~qXt^iG(O!g2>gnY40{H{OB7IB-W~gWIYj8* zKROf* zLo?=(kheO{;tQDKt-?in&YN#y;kQOm4k5|&-%OK(x*%}brOIn?@0va48nw+rZb`xJ z_f1#iH!cZ@@i1KCCFij(u+U{$UwpA2$5yBhBC07GS4&L&ztW;c|At+oxfQX6*nizC zH0+|@(d2X%9M#m8=(CjA#u{wFZcu`%8EX%#YGtD4R9|@uqY5Nd^B$1^48G{-+wK;v zj&7Hxk zM!>SAucK`Ii&{SO%Hj1QWblvPapxFwJ!@IX7=7(UZ}0O;@ac>+BC`0A`eje1UB`R= ztTMXz`9D3C5epp%!wB89m(e=zOl>Xb@!f{vVWPKkA7?N*m%H@e73Ln04fJylGnaGO z2?YZu1lE#ZNKDox5T%9!3=L$A?tti`C*30Jkwg`jl3(X@#ON5TrI!cq! zTz*^d`Qfm*gn!pOc?}4 z$S2rwBSDa(D=2a}Ohna{JUxCYw{2OYXI>KwZGl@qyTz>pomBoKAHpwoeB-Rgokh*x zH9B{uuQMEG^guauByJMdNp#wbQ3DCDJ9OPnJble9l_e?646n!*t^Jl+2{k9RTAIuY zKiC|nD1KHE!l)$oRA!T*tB*g(mt-}|LVp)uj!7;`-g{P~+UwBlov>M$KJ##rEgJ9> zh51nE7q8Ey`$Ap^+hOLu#La2G>Q6($d@0*$C3wrmm@%O#8Ub>qdK^B(mN%M)+-nY; zxAep3XFsYS_V>~bkTgbX`)&N-NEKNvgBgnr$5k~uH_mvG{(7k3`=PzoEEez_(!%?7 z1?b7@3l?^;uCT>kbozvn>Y^3QT?qGt7h!`y&PMSsDi%4fSjpLx`OBl-cBAB@T0zN| zLBNqKiB0*`DO}uI7upDR>Yk@hI~na=cyo1ci%NtNmq6oIUkt43u+~cJ^2z}b8mDqP zvTcTpSx9Jk*l z8*l2hR^&gD@Yf{_hFcxzr7twh^~`;1&KvU-J=~nxFt+T!x``qI87Ua0!Di|ZQ6(&w zL@3I!K#<`x!MESd@{kJ3kM#vx&D)c>&$~;hQ6A+qgJc_UVt$hBU41W-0mI7_dEI)v z%YJ27HNvv z=81C;ZGh0td>K&p-6I7Qml9k*_Nh4t|B+g_Ps)nO<$4WyoZz3uLgJia$! zjHNF}+qZ?)4*=u7tF?`W`xq#(n#THl2GI2#{TI&b1@Cs5$aVvEyR5Us{aEI?=%ajW zBj}wwirhP<1m&)34%IwukdW$IiJmw6O97fFPi@6?;&xyTRy$Kg=hyWjNRlW(~K!&Ix6FA#_# zotX2bfXzdMTC~MS@Qvh%=w1&XHT@#==KFR|-*+Lp#|3_tnwEZF32S029E6(fhl5Gc+39VHJG-?3 zF)Q&J++3kF$}_)`bJxY!E5|=(z_S9yu{iSxw34JPR0qm7%o)6S=UxbqY zYkUhY?{+-yRFjh(t{hQmk^sU4P%29F$KT0WVPz=_u zT*4p=QB-zytNbZ|!1?m!OG`gkr93Az6prH+i2?Stm7S{xdnA{A~-7hx1nuH+l1Tfyr#AOBqC0=y(L75f8j!yTYai8nqZqhlIrb46+M6 z;J1-}4RVLFlV#}Cl(O>I>p~Sd_jXXczfj7E2iLA|m%iUSeHQOXBYBX6=NRv_6@(6fkS@nOGL%pi;v4R+rgkI~T-`HZ7U`SHVn2 zL(kN%a*i5J{*$G1DR*}*W6O-nr7G04L?f>=fK?;+xPc$99X$f`#~>=DyhrGu79%Mq zWD5>j@v&2wkU}IMYqA)#4Ak8h3qeE_*(M;D$|Ts8fR{Fd;!M-R`)3gRvL6}ITCsCg z-p|IQ`ZmtUgtfnaN`ahYkv7S@61*>pdMWz?0%$D7UfL<7ld0KLi3=o;Gt`EJ&FMD3 zCZnd=n~St&R4h^n@1g8-6erDrL*$$6*AcA-=$|nJ_GK=O|mq#xAE|8 zHc10F=|>gq_s&xC=ymju6F@|*+50SX)wfVQz~^>DrKP8?mDwNcnJi6z2G|+3VcB)m*SxUcQ{T8F!pS?7e zqBhfQn=tFeZ!NhpFYMTF2*+|$sYYS3KGQ`cr`qMzu}zdR-N;}D>=HZWyedG7eBg^= zy>eX%4)1cDUkE226dZo|X)5DC1DJ9^#~g&KNN%rJ=5>wfTp)FO>OHZjGgh*2laxsm zrfsL{#e7f=yodW=L@k@kU*H03&uR^=3XDbBmK4Lpoq#C31uMa9F4)l~)6%1+TN~`j z)ikamAOAb3?#`)AN@h<`w)DFiZfa`aQiM6M1CnyjQ;er+Gj~g;BCh(=<#QS zu{ik>58h#pW{m6`V*}ma*3q)B$cfdyt4};GPVbHahXAL{TH+7PA?HU)!c=P{n}I6T zFHVtngo;5(xwLFIY3SZoG6WGsaHuR6L?Z>-u^%bczi!ZoWO2Vi$l?fi%5ru$_sXHG$=(k z^*cTW@K5 z8?eb}X>1`y&G(vaI?|(4?H{mHE^;Dl;pa(ey=q;ItghnTBwd1nm5sxC(_>MCOp^%( zKJMT(@uJ_K{0x-mi8q|w;rhG`5CV&ZWM6m7O@PQpkXwe7QBg}D?TD;537pZs-u4=V zdp24b_@f7&BP4(H$k2|=L|`li8$W^&o~lFh!)={`t{b;LG01ZUcwC0R;aLo&z+|cG z7?yAx)f|}Qg5m~Dq_Dk7QwvGapfpY9Dfd0w`O=#R;>9pp7+~usi2JjjOuNi(Y}HQZ zP1L1_IFY#bv~#uMKWAuFpei@gtt}gO9A*aP&J-#$8&~eWh?r6On(9@Xy1;@B zC1AjhZ3Ja`ais%z%QF66H_o9j^@pbOGo8%M5rW6#o^E{m_Lwy60+k>foh0ZDllxp{ zlt70!iJ`8N^B99{K>>7(J55lARfwOMz{nXR3~X$u8ij?XyUtS;^XWtRE_*!d>D@^{ zmOW2Z0zo=%_#(ish8Cn4Cabe3Bd&-&%U+s=jTynasuzC)Or3XV`0g;;=|TLrieD8A zfQ*GFu5OtB*Q|EsB&f$2`=cdU0`fAFrvCI)q^2G79-l^hUg{Rnmkz#VMPL+4Z>+fs z0qGO&A%5y+TJZi>@YBN)(W2$2Z;_iAXwoH0Rc@M|CMMQmKyHxdwei1K`wFt!n( zMNgQ~xGrTYRm2kE8d{PY#;XN9+9H@zxoA@J%H_s+L3)6e_q>w@JiPsW6Nu1jhb0Uw zAM-nn>~hJsiFVl%^{!@cH&*oB8x}d!pRCJSROEirFbMe4N=JF8j@{;vU!X`vDtTgO zY&+Z_DgpiSX_0ajhUd3zPdDo!STvV3%y~Pd1)JqgRVIG~v{fPo-+UOLbPjfz++u;hK?1A#JCW3Q%++l~hUTzWuOd6?XlMY`>>Tc9KFqkgpzKef%~- z+``%q$!NqW_TmofA}Z@|Z6mp`y!_RDrp$o#dUnIU3Z~8$|5N^uE2FeQ&;ZM8Wnd>> zFbBVuC&Wm=)_-n{AwZcPqJRTw<_opE)O6q~R%=C%(ti}aHHY}jUZ6;ze)IsG>$KQh z0*t(y;Js9EUXU_n-A|k{yg!m&5mmUWo`361+&bC+#Vxpf08^p0D8I&kmQhVqjt$Fm z`L3=fZt{+3N!8&yMb1gwZVr;!j~D}YH%ffKGaG z2;MiPKP#t)MB@GGoh9cY3aaM#LVfzKEm_b-b;rn_XF+bEMO1#XunjiL3T#OF8YbIM z5h(}F8be$q2@m%+W3$0?rR>|ap@+J)C^RIJGVvgz^C3rU3Zzigj_Pk&s$YNF;A%vp zOr#{5qn3RE=fDWyCzL18<7m5J!Bl-~AAi4vaJhAxtSn3yBWrVZ!`+#1C$Y&Kqqp2k zx~;qTE~&&{#>qxStKnX@WQ=(~0LUvXD1x2%40nLrD?KLR9@*Jggq#$zE(H}^s1dZu zj)hAvuE*`WKqZgHmZ8C{El$mMU;AmK4cJ$*XG%{d1{9%S6jSc`7ra2Z7Ox;MbVNj* zw#?GjtA;GbwJ=&ADO~SHO*5;f6^Ip?ghQXdhWm}^?Yr@lvCnf@iNo=$+#OlKpV?)M zIhV!)flgD%=Mwxn;?+hy<_V3z>)5!i?gG@eKv;+MnH1&Z6?%B|DtjrSJoH=9tIGzo z*S=U6^F6{M$KHwv%dFrlT3SArftBXE7q0m8VD|EiN2fPaqQBm{@-$>SGSpiWnkU>I z&!AlaW+|4=TC?1P;z4X9nbJJe8oaG_zag~8g1b!l39Ms_qJ-<`P1}k=R8S=! zSHh;DGic5Th5#il9;a4>Arqw_B8hPP?&+7cmU$|+9sgrAPx%(5lnNwv?n9rDZ2CP$ z@12MXHix{BHY3_g7lFXSUG*NMBSCqA`-wDY038mY7Ft$d7AZM5L~2*fb8*!9vf!Jj zV{w5te2^3j&2%Ly)4Ur%R*Ha{-EAmjs89%c`C1xgq`33!tKidi44yC!P;bF)(Y<@; z@__KF*heqI%2b|lmKc-LJzi?4PvoBKCNLCyiaHVDdn&B`wZAV>uNdolVXbQCMhj;p z6{M#bD-!J`fTH+0%Wy7u7B8kRsO~)@J!0y;(rmcsnD0u z)G#4Haa7$dD-Hf1p40c9x{i*=w+cO~c9Ow6c5WFn3>)>=Jm=P$cK@bhiRXt+K$=AN zSIdYsgA0e>ORf zuBM-+>?;ercqw(9GsFw@F z>tZ*|tT3YxWF5Lopp81|iuSR6GKgB4@n&!+@N@(4L8a?ql1Qczx&y73K$}n}3KRO} zTwv6+7Sp7=;NXD9D4Xw}D3!zSq|vz$uzRnqCK)_J1kzPTN(0D+uTG_JEL(sH60Sdj z4nfW?ep|zj%hhi%(6bS4`u>$RVpQ1M@HeC%9OA<(f&3J>4V{`RocD(YYmufEZ1gNq zjMU^G2LT)dYeB}IqzsIh%>wJwZ4W|VUXNTxrwTKLs~a#3ePBB}5@5Ere-kDr!tIXS zIEA9_#0JN0ZBi7vtQaEc1bBXL-osCSOkDp4b!b=b!#6@f0q^UgTVKEdVEnBTgg%@q zGg8iigJC0_+_z{!Y&(m3fQk(sNNur8rxz;zy)-=8jZ$7WfAUxKU;Xk>_1pc#GD3_D z3{%J$OU|Chd;fF0X7?-pe??55M((%+({~4kUq;y@@ifwjEHE?_(FB`lM#E6}!59De zX-P;Tn>_ve%O<3B)Kkfy!Q@B@ir8$fk-WB){R6Hm zc4Mh&XbAficS)J`yeZ^*LgEJ_^AZXvuPx|$l|bxZ#Z7MTPZUs(p|0@ez^vLp3m$&> zNnJcilw-WH)~E!aj+cSaA3m!nOfTkfw6IHzYsA+PaB>F_^m1Q<~?y)S;#WF z@sm^>j0+x6CxoiN(#2h+b)nm++|Zt-Qd3A|oB%BJIR~8y!SMv$N9G;QG)yvYT5w*Z zljc!X^@#8`(3$j~%L>GztYrn?E1#~Q3*`Tcz_8!V{yI`4Ym8&g%+QLItRbU<<4qh1 z8kaR-B5Li!XUId4CEmHA#mEODa}0bzz<)x)1p`Y#{^Tm8muDNs<&q*x;|CPAhnoiX zN{9cHJuq&$apZc{$QjxpLF^fcNpt=V9dndksN9_Pe;`jy@3Ru>=+z*AKa=nOQI8VI zN*?!?ldMe))YVWbLt%x*ALoHzKpGxEpvpgD<%k4t4+zY_+~x4Nj?-WpSn0{uT4`cn zz?yvZ82>>lO%t{j^4Di!F_UVSdh?|UJ;bC?domx$Brev2rC&W!%9bNw8Gp(uo^Io; z-sreov9ng3i*coxTGbh}ggXJZC)D!|7miJUOVMI30VS^SPHNb!ZN3gnk^a$(b9&d@ zDM1q5kP7R(zG3dIfz@*q>C>cUDbDYbKs&OTp*In~2C3tQUcTy3B~(+C>AaB1B%PqP zd>UiJv%#_ed<={-JH3`VRk4(7;=Bc;G|z=oh3A!OZfj3zl%)W~M+gPS&0aX!_WX;u z`++Q7EgN_jLCycSJqgmnmI_^7Py6rh_9;|TKAumJ>=>~qzj*5sC>X@*R9=SZkaAyj zu)zAc8_Akm5wLzzT33r`>uQcTq2JE)C(isP_iNkKptE{WtyJN0P)$`fbDqs-Ygie+ z)yW6wp20Uq6NU|Z+Hd8JXZx%U4vurSe*qmJ5>`_}N1Z&G=JOBJh(<27FqQCBx+1;f zua7~yf1(>}ZK#7cWLy@>PD7w@qX8{-hgc9-Sj#T>QOEpe;9 zRolPIa`~93-qY-uxoZ{l8~keCH>&L3?%9;5_Vho1lABFmh^z~1CYZ|NEdjrQcm8bJ z@?ZuMvWhCHu$r30`|m+2kQDAMatj!*6$^PFv()SmM*SR(Y`MWo6&pUU?}a%c zRsqpGk-y>5vBeTaPkX(vXNq}C6~FCf`S6Abl|D|H{ykTh(J*HB-s?qk?qcT{R@Zxu z3T6>nSWIVz0|m$%Fk$KToI`a=ljqeZdyeAQIeA3zlyRvW~2R-fCRH_YEgh*Ey{|j{x?K zH8FkCww}I#1QAAOvq#Z_?SV^uYk_#>)JE2~JG~tJeLF%fAQ?}<8nFs~eW7RrF6I8d z$ddyZGP1!>Y!CN<#ZG@NENTJaXDl#6o}|FB_^Bm2mz|Nc0d}BtW&V z|3{g8I2YZ6E`C&YF{2nFJbH+3rR;h`uc-)^$!>p8mLVgtf^d_BTW-xA;-4gc(hy-E z-89rho9xDoVQgi1%4$ehzFb3PcN!&ab(v%aJoch6q_EoNEy;xE^!HB1!eb?vdV8LsjF&Fdz>YI2P3{qn6waIwkY>}vq< z68;Mr0dB2&~AOC|sj*gEjsdbL^iS4nm z6H7|y4$vK>yRpax_Le*S{*fFzqCe$o)uxGnXc6UDR4vKRfG%E;t(gCVNoJ!Q>1l#P z`XFl57s!GLJX|>}LPJ6gkUz5H#=ZY%RuDIYYc$%;1%xN}EL5&#NdhgfyLfxibPSEb zP0C)A8VEY+Otf6YsdL8oFkHj12;u`g0O{b7XO+01BP=24T<|&S(yF}jW-hkfDP@eA z&=zVUC9q-h2w{RGvdC>LIwv?bw4FQ=-VzQvu|`P;juC=(>F_+rMDT_pKLG3)inCau z%AQ>t5a*$+#Gco}MBr#E23DT-VM3O8NbCk3(jaiIeAJk|Ap;;wKw1n-T=>_C9V#Z_ zInYgFpI~f^Cdf}Sqt*3!QW}Um`}9HnZ#f(#7lz?yj27NO9K>s0@4t`Pbg<*+9L>AO zfbJ67nCLt0bBqM1%}-YOg4DO9T{v^&%hkSPhrMX@zjC#KJjZ7slL!zAP|y;#O>OCp zB-J?FTgl!qzD443P_yg-8`m%WVk{OglM_1gC#XS|QglzEud&_Bb?4m^jpdMiBh-vo zq-9=;9V)j3U$f+wwc|$>kF;>>m9HF((E3vFVTk(s+?5<-{1YTm^XH_uF{vtm~eS?ng`#7vk$B5k}*h( zBiMwhCgoK*e#C}MU8^a>WumrJ)t06-QwZez4t=L~oa&U^l5!0A@j&a7*pYf6O*&hZ z&Hc;sjdUr}SY$(j;n&m5*6z5$@pr-Bb%FR4x%~fmrvCay?gs4GYh=H8m4CMgDRWp{ zv(PIGnR<-P2ICid*`#L0YZ3s#p`YIeWn6s3LUtPv-92nF{xRraJ4J{r4-28Fx_iGX zee@b|A7fD_@C@8a9~hFYO9ogFmBZ3ua0tKX8ECz&M&04yUM`j8WZtrIGPBJ%DUBsi z405s6GPDlG_n=v90kA42{#r6>!OcXr<#5qPx_bS#v1M^iv&#C&oA5CA!cW6tg=uRX z@dcmR4#ZWMTq?)eLje6Pq!~-EzrBe%{hwH)xUa%eV8p*-7Veg@ZBM|uhCabZfVY~% zryM83#MZPQgQK$%HCkD8^8$n}e6aaJeF}|EKO$|#(~c!K^6Pf+aojzzoXQHF0zFN$ zqmzB^LlLhq#;y1(W&y`ck=5KI`le0p0U133(qDP?(=UQx_s4?`(M}&0tHjdQDc;h)Q8qgL9sGV z|1{?V{4?f~G(gdsK6TBCHG&2?F1y>n4>l?JV*L4!DngPO{kn5t096GPNNlIO5a#%| zB26atV}zBr@<2RkQcl^3D;uI+GK`pLMk_4sZ2=;|&Pn_!WBwz)0uDuuxDPKT zffZl(SNdODw79(fuppuVk)T%EKUkP8>sj&py?|I=(qb+Nw1O>j2jx{ttd9D}W4$A~ z%hoverGh?Y7C>KN;HsR@nDelh8y_9<9i$@Fqe4T)0``R+(d|ej@uq%`v`F(fTKr%7 zFhE_GU$xLu1S4`94zi{|tmbV8LZlc1$5?jhV@3>=d{dvdEIlQVao=fiwKg~|q?ehK zqgSCwz$I>wfDH!ZDKg!!B9(O3VV=owsM=tehO;fFFD}1@@dRdnjIwYE*LA9v_jLlr~p#0*71w!v%2GXOmawjCm1 zg}Nv0z&X5T>74*UUcz$KJsQMiYksQKV3zfzHof|y+6HhAmEzAc{Yhvqqe=A^o@6g{7ei^9XsT=e3&!Fkyi`=otp9q{CE3)-0d%N&$jUX>1BPBU}S0~ik zN3gx*(z^cxYKfQa&SD~ZYC`( zEC7jQ1_NxtR>8r+zP>`AL^7krflLty$C8_)JT^7)!NbD? zu_p!v0LJhM^`FDv3AD0E3gD{w`T4oqYsbZL+Y48))lD0SMgZ9!&Zsth@JJ|#uRasK zf4UbmqsFI^{t%BLPPbX#jxVgprqv;@x&gAO$ZM8_FALk(ED}qB4f1w+~U1GD+cHN zgIssdHG`OB6HqjZ9As^)I9hmqYW9Oh!;AK7E0_wQS^T`sPsGAv<-bRRv0ArI9LwDQ z@PM6L>2J^Pe{pl5??>nXL4s-5Dn>MD-g-p%eEA_=crkIF0Sb`@_4A{Q44hn?J`LnZ z%Yl*#Zxi%gtS)QC`~B@zoQL8+#o>g=P>V}s9bf-=jE=A&+#(vv0)68|S6XO49c{D~ z2ntJ&&W5dPuv9~>EH4mcjX98}knTTldd5h+P!{()FCBIXWlwlcW;d#9{@8N#KTOjb z;;d|XvLDYpKBR7?3w%&UAFc9}q8B9cXUBM*>?}I4)t%aIl)b_sn_h1J4g(Rr>cdGOY|318~7P$V@^Wu z?oEGsG=2vC#ypKk&>#IOXs1A2Og5J3IwB%!?N{|wx22|7Uoq~TrLD``@FkX8anf;N z{qEm5bsDUyc`)=iAg_J!)N-&P!)rsQjb{n?ds?K0};O{W{jbL*!UCM6r3zzRTmtIrX zwyWgMW=5#M=6Hsu6??bh#Agwfda{x?j%!}15yr2Gho>yjWOx}d0;M@QQ;gsOpJwOy z2$_>F_~?PY13<7@;C9%e^uGD$$?|w&=lN!I?*RyQdbXRGm%_L9DPjo zSGka{pB}4$o6f)e{MQM;8*I+%79-|ADE-V*-6(twyPFI!>J-7=A;bezx~RhRHBR5x z(4vxWAEL65sqF-pthqrF^hmia=CeSU;?^T^LYHX-$@Udy?5>_o=GCKccYD5u*l8H5 zPb_`!iRSJe;~u)g#)Wb~vrC@Ap+@6^2{n`^;NwO6i*C zOQEw;u_)Eie2QtYl-=$bNV{SMq<78D-~?n8M^qWW|M}wks=h%P*nTQMFdk>3o8e|Q)e(L*T)uh!)bFu#!oL5X(ht&*}v}zRAiaG3w zO*7h3YZ9(iVV3?|*<`t?C_;&OERw1L`O3N?<;}$h&B>O0OKeGxXc_+!H-ShZ=#>dH zp_c(Cl~|xEhRm8}NK!2&pDGa?c4J3VxNj zt=BAhDFUl{DCY&O#0jhyB;BIUy6waVe??i7^6H!yaz$-R5GIVo9n;nNZV(DQH}{{I zhQLrYiZq2BB`*VZmgP&Wu0|F6@K58fCwN=YKbxlxtLo~Gu5h+2*yk5CIhZbyiVMzs zhsXvNpbElZfVkUjPLdzxYyL2=2mPhF;chJt!O}~OYJKGYAh7C)(XV;5=s?WkmEF{E z{@wly0T*&n>eb)Y%I(NJA?CcG7-0t`K9@H_74ZVE$>;^-iZP4T|GleUYW4A2MBnT* z=<>NL(>SrZer)=wDfZpO3nSe|d+xa$L^x3=@irU?^bT)neSd?cY2iQJb15rFU|%|M zZX_zMn%nb1Cj*`hIFXIsTV)WdnlzXW(@{O(kKF1f-~YqdTSZmXzER(>X^`&j?rx+c zq@}w=K)O3NDGdVBjevA_r=WC-G$I|+lJD*FKlr{e-jnwXaV^H$Yp-?3b=D2pq(xXzGL^^jcvH%m#(s#n zeOz=cFeWqXPv+>ZxLImXo5n+u>E%}zom@{pcc2S(J&Guz4VJ9=K^)O?-<~uU7k^}e zzI1^1=OHq0H(aFMP>}Qls;iyx8gR`yrM#Ok?}Ui zPyM4Fv^$Jv-&2~TMvW>;-tR_Ye8k7T;lcf3537qYP;S#Df9PR1BNQ7Z{35K%plB}l z_KO($=8bWf?ot&pi9Dm*6xL{SiA!}Zx7Mo_oFT=$OCDR*$FvyBBRqsudFp4^`h={4 zlDny(&bZ<`4aQ`hh&;~2C$dK3*PB1;)VHrcZIWy)Fe!>TM%@AJ<9iD-ISc7;d~DMa zYd+B>Du3$JJk@GHaoHWzQn*Hk{`X$|6b3uq*%G?h-*hMYu2`Ki?)J;+HVc`kfIO`!&n)TxwT)elXx>7N{O<0K9vpeL#@(0;VmrCupeP9Qhsk~K2jGSfXGNp z23j@1QL9LMjAXD2$$2wlan#!D8fAED&gT`l+(D4Jrcw8$wzhDx z?hh?X9?)yQmA4yv7M)*n=D9r1d!G-a|9z*$9vr}Mq!`W)xbh#@E+2fSH~?JVOJ)(F z{Z#6|10L%kIT{i61g)RD4*$l!?f_#8^uM*w_a+Wx_drV>`mstgbQbKZS7a}+^VZYf z#EMVAW(8wDDl*xOG0E>+G5t~Dt(2zGN;Qc77=zt>SN_KaZfk36`T5Vi${14?9-oHb zmtKnR3LJ%w5T#|(mm>d6TCuA68s5~XK?A;cG|y!JUQDYhC2V(~w8@+_qU*9n>C~50 zu#YM3|NWIwJ4{x`OyBvv4nA!h1Gu_r_~mloSh& zbE|!ueNCcE>GSES+2g8UWQkM?EwlfmTanpOm3gQH59geecP)r9CPAVUaNpjjArMpY&p6U?kti{(Yj8p0iHwxn_yxtv)-opB< z+hV=pHYRdY9bS=g8W!6f&%I|MVH6z)5C(n8Ycy=~pNaD846Ks*0i0rS0s5rL_rm9j z_-uVhsj-13xM{kCSC=yHZCA?*LLX~h{^)$>aj1w-ygqmwjN7c3;VA9?TgE-Fp>B6R zLoSM6MJ8Qi9b-qri0Ao4(*-C(Jf$pmKSo+Ref6Y~@=3WlC8W|U-0Wbn)ygOF7fk@c zs(oYoN52~3TA=X7@7etj5(QzyOJbbKw7ERaFQSikaK?3$DHi-N&Sw^cuMfdFIsMV| z8Zr1yhDT^7NB%CWe`oGIqw%E=z3Y}|YYMJJ_F$@(Y%7UACl6Xojruy<T2sQy z&e-tDxy8Sl>w{{>7InN=_)qFMO2zQYwXZ=#t@v?y;pOlAZBzc@Po9eAeoA3VY?d4AP8jx<}gKpNIt5$QkoI6bOu;NyRp5vw=N4>2Kt0xzyFaQAhDm!{?u`6 zB>=H|(NYcDloB{n_xkS2u3R2$*EEhmr5h6y^B)GgHsJnRL;N2-)1s$dGJ%9U_Fqxm zei_g3e}@9lwd!};CyWa??6cUrgor%&%jY&T)WCLf``H107d(^De6Ocwzk}2d|9RpkiQ3*9L$8&XA?ybcv(z8Z>zC72g7YIxkEB*34at%c% zvj_VkL}-nFcD+HR_gxipMCxGWwFa^68$!%@mjG#9!M_yW@odJ9eL8@qD-U^--3 zIi{M@qL%(hNv2RSAmq>PQjPw!TuQ=HroUp?fW@-HLpdAXuvZ?eFZ0w=BODyGy7;n+ zdUOwte`t@^o#Lnb9+%aRwKV5Hj}CDETUz~uIEeXdEn6-=6N`xyVJNEIL^Hke7rBsg zi|If@wC^yk+tonm*(?O;n%AVu@o7AIsm=;({S*Edfhvow=`?)2mjeH@JjFQhrE{ry z0m=L#o=ZbJQPLp4k>`w&CViE9rgLyn|9DrCNB9!nNQJR}sf$~ex$WRB|FtEd9*GX{ zVd4qez2lHRuE_a9 zkD^G7xt3gOJtaJ<22PFoG=KR_CYQ;SW@)71_9fT!4^hK4By@c>STB`!W>`Wequwzc zo36Qpz8ba>LnkdM&G&K1a8#__Vt<*)>xq5>D%c|;hM1aoJZxtKn9YkdDx@WBW9AhS zV_LTP8$#m^F;=*{=}i=5h1AwskH)jZriMuf|aMXv0{(yx;B5i}hx*W@I>RvHpn7~xtz zpu`9kh!NNaewYSC{pIXR-!Wr)M&!5BAQ3l(`);XQ1kR?gm(87T$lNGW@uQ1Fh$HL* z{Re;8DecF4g@d6Y9@L+NONYyPV8IqzPQd0V zcQfUG@X~RVu9FVEGIy&NL-g_jF6_c(*!1IziZ6tW+si^@JQQ1YtNmt0nR+0)ss!Dc zGV$$M(+kk3OTM-v1<3{uml=}JOo?v5BXgas7lgHEi0<2jJv<)u5=B{KIAuo^7=_V0 z)zXLx=%iU+VwEu2o&44V&RaCYn`7sEWPV=qMz{a1o37xX3{r)_-ScmEO#W|~@t^Bv z;h(<+3{LloasNa2Hs;O!zy9i^yvzUZ$AcufWi`vk%um1459R``pcJ4ocH->nx`hE( zQByNHF%j{xHf;VM^>@FOpP%2y*Y}Mslbm5wb92P!@Nfh+6m)b-39?V>ye1K!?r+wE zwksN&^Phm5=d8x4Eku;Bh<`40wa+h{w0J7hthTiH(U~@zS**@@!1hP8Sl~4RJUr9q z2T>Xu{1EK)_3EZ7=)16_{@y*} zC#7XDAPY_Q2GjQ@8H0ghp{7H!l7Y~slRHlMJ(03oc zdNzUMPJ4y1G5mreHZ&B@tJek{dS&LdjBnG)ez(B<_g_C#Nb^=PtO3&%#Two9j}m{} zCx=P4z-t$z$q*GPLIGGxK@AO!m4YM!l70jde>1`fdS8K%1VL!c%p#&(MPjdiXY$5I zBb|G$MB0MP9fuT+_im3@7~`CCycBqcoH0Qd@>_7mr&U1P^n+wYK7^YHIEgAHkso;)+10S^Hp+*O|pjPLJ0oOD>ZNjhKE zP`3Vv%Qm45XUZ4-+A9UAeHoFu@uZIox8#)U3y5zZyL&$iHE>^0CbmwK^jCFs$Y9wR z;F?`lZ8H}?(*3)mDE+uDe}>%l5i*_MQ6n(&szY=ILnPsX&fK|tXSO=~Tlz<3C}}pN zI80EAXTKuKnNmal!W-a-sO2eDbULKpteCV?ED zob6r_0X z`QbmJv%>wC*#G+ttqOoXO)dGhHfG2glCe2->(0BDo_h^`%TzUqHl6)|b8dhe1P|at zhoqY+N%@+pnYUK~?WlOKu?c!T9MMW=`F z6!JA+s~;Gq7_F!_jSkIWry4B(oeJ><^vYi7a{HUyec){bBud?H*wjfBx~xPyGtNz& z_#jw3SRzLsfJz5guB~nanKpp;016e!PN&Sl}>%+KI@k>b9N>Ha4(R*B^KLWP}45E+6(-C z3Qu8Uo6Rt5(&1sUWWX||lD#r~hN_H(Gs)(p!ts7+AHx^@Ky1`<7Mfu5j6_FOk*y*3 zS+q$~18u^V31uc*J8Br^!&g%MpoI|A(0i{UrxTDzgnzFEcr5?WAI)?Jk^$2u-tImdLnstDqtZbUm%eCOV~F|j-FD1 zKDJBAe9N_ho~K8r%KY^995+CHtzASGLe+|(SJg>$nzAqx5HkO}w=N|^6=2){dDZLX zKrf7N1WzsgTi7e}#t(;jay<@_JC(R#)njvp+&rBv#)=c)|9)8%8JkNO5lMly$mx^T zO8fT8_8wtgaCKsQl#@B{ZT@QG$6BMbzz9PlfrmeaCd@t8L3g zXJly`w8KnIG?fWEN>Er`!YM=B$1Zi;%2bNsHN#iy6ZZe(kdvPMdZW+nKQv{kd)#S2 z!Kb$bzJQr^q2n6;4TDGoEDtL!@DP0A*tuukkeM-5f9>O(9>(Pq$P2=KLIg~19-*{1 zh+0|F_OY9j8Z$_5Tg})59{B*QScfOay%R;nu;pSyevq9S&HbfTjL<8(k#iI^;MCf~ z{d)PkZ{P>`b{GixzY^=$xm{2`hQRM=M>ZsgpChPXeIokCnSG5BBjruM1HF;3rUGi{z1HH7D3inC`m^k10MZV}-5tn|PA zbfcWVqg)CR^|0<>)G5N=ZiUS)HFb#|MSo}MrPfwxCOo<_uVi^ggN+VXqx2U*y$ypN z{=^=;^tDwDERA4N4iM``$o;e4OR!v%TkPUKT5oJf5bTBKNrsODoFBj)Jqfx2+I3^e z1WM->iA^Wp6x*umvVMWPvwzvsZPy-iGg*fx`*87$ZeaCb*+!&(R5N)FS z^Q4PNuaGF)gluygoQ6^Et?Zlvq+JLpdy|-@ZO8O_D>+5TRFfOM==}>=@3U+CtcJ?OT;5DbHB_<-jD~eR29?TUCmvfEDcQTYB^I_w zbNrbNgN|{Szv|uo#BKdb66Cpen!?gU@4ksvGjRE}gmPbAyZ6)cFTq{J9SmsdQ4d7a zERhT4>TXB{G#p-c4amjUr|;aN7NC9R4~@iVG29FM`~m?mAnCl;HsH*|)Mg5LWe6RE zr0Wkvu--0pfUTz(yn>x*kmB#v-S-jiu(W$K;HySt?B5lPK*)`wx5ls&Vh;cSloXly zixYoz^2=^W2Ky>axj9=|CGSr`6}?xO_08Eq4V$u#Q!$*h!>&?Tx`fJkB)slybv$?7@o?9YZ^2KGSl8$5hiK&aTc*rl|sq`>WPVk*bu8Z z6Mpu%Jbw;Ce35;OkLS&*W}Ufx@{O}FHvmgxdLe6vEcqUznS6jXU46=i&nxO`Ce@dh zkgj`w=903Z*BuH*jJIxYGu7TOaCt-W%m%-ZuAw0P5E`~mSU=5ih7n??DMF^}J}*(2 zwcmPvTz}5-y?Qr1qixydeG2lSx31&1H@~$er}2yfY%gRmtg31`Q^0BAke33lr-e`-K+Dla+z4MSy8i@<*r&n zY9AYyqqh8&g-y%0Tl_|GHSs3n!{^gpYN@s>AmJ*2`N|i7E2IYq;i3Z$*#kQ3&xn}+ z;Gq5?m~qI)vbT$m8OAkbG|U_hD|&g53!u^u1>gkL)w_ zj>0=d)a(okL)Gzu_`)&p7V@V)H*U2cdohdVO?dII!ig2gAN*8pJzOlelKfaD$-y;; z)rpjWC!~oAlJp_tP77|kz~8Gz|8+hQe3zx0zPSBhKtA%(gJbhp!Y>x|`WqA)5i)1c z>hSC}rH)Ma;Ko9KS6x!-z9#57Yioa!vqspJEa6Wf z`nzhA{|zYEwrB@%6XjCp&t`NCQVIi`dw|^7L;Wt1L@$!<9uR5Ow9DL5@b`&0(%$R zgcbUBi~9O>%_}yH>`~~=@z+_l##_cwR5r|VBpLD4zasu(8)-#_5nv!}3Q0DI#^y)A zzaoX@g%+w)L__mTBCx&Jk=W2)OIs|9rpWz(-{pdNU*c4T*?UQe0*&r%kBlSoNa(5T z)J>??Zp;ldhJzp0Gl=6z@o#%1rsd-L%Y)UC`&!9tzX{d7Bp>lD`TJg2IM`2;t|ia@ zGrWKs8W-H9F|;Jh>x{EUcpKZ_o4ru$|Gn=vBXpXlo+!%-af1y3>C>$;UoPn#s@vJR z*7DW}h?9dW0rR^wCVczkf96{7R-JHy`QnBc)YyO>@?S3P^RNF^{vTQYGZdg+s)1xP zV!da)J%~L;oc`VptqU>!Kg9i`aTabd;oi4XLkfWcq__a+QNp0`PRG?C4_p5uqI)C6Wp7}q)2grCsQg?Hs2;!;uwBVHuK=8KU{68 zOu>1rLtj+M_F9L9K4!@X32^4KC_gY}z6$1^@l?%FkMQWFY&!WQ8HtZ2Jj~+%DD@dM zRdgbRgqFf|Gtf8%kLhI>^v*6&V7hGF_bE`b=H16-7Ywn>tq7L*{{~+U%KZ#B$*1Hm zf_4tTiz(})7c_x4&3=G%2(~f#V+y{Jr6OpjAXoEQQ}VLh;|`w#CoRZ}Q;k%68_rxo zePq6sLE=)LrFa%{+{rw>E z$r+c?5b^@g6Vl3TBf;Xl@XEbiO2)RzB|m<``s~ioQudoLw#iB~fr*o8&e}?GFqM8P z((S#(&#L;(x$3`Be{7M!9$D?V;iQr8L zK)^w$?4I)ZNtu1Jb?7?qCEVnNtm8{Uh}+NB`{9y3^8Pq9a(R~M6@|Vi+ngv{AM3lY zZ5$q>7^;}V-e(|;it|!SGYTvtUP90dU~l-erNt|Q6_x6LzNhTeAfuWZizXRXQ4IU2 z967x!6o`WT5Y<(U#y3bc^VEQ>T?awL$Nsj>;l(tSz&ht-Pi;TLYxD94zI)LwP`K3` zf1D4KeFrOTtFNu{fR7vtI`pJovP6OmIG)M)8y<5$Q<9lQwj-z~T{$-{I*+ZAyyhgfrrOI^)FN7Fd!65AkESKoEt zj_~cv5*B6FL{^JrdbPDF8h1XxdJSMsH-0ph^7>CfTb9@Lbz)zn<^e^Y&t~NYY5~## zs1QixfnfAY8S_8Y`ByeNe1Y(nEVOjZF@e-mENKGKN9hD2AHZs=oRvJX z{3W(Y6aOk_v}MKU__q#_%qHK#OVbMl+Cd`_%K|+bHB??~WXb5pMo7fuk83a>C*F-a zIE!q4e;g4<;(3D%6({y@KvfG$C&M)2ayPki z;S?aNX5%tanIf}tR(|u#lbeK%2LnDy9X}rlBJS8uatRCZ{ES;66%)yPp3zhSw54jY zjKITz>X1W2+~IuW&Y$~gc-z`GwoYSjuaa9g-}Vybe2UCovEy@DiO}(?xj(B+WG))t zXMsvmZ?!$42jjY$JrDR*vbzm->LeH>v#1yCQ^gz8FB>OC-hwg&-Yq_uin296D&TF&2y_OS(rjY8$ z;jr}eDMN>eLuL`l9yX??@zre8J`*jUp^i1^mDEbb4Yd83-Z%R_}<~`E{g0Ceb zYYQtDJhb~S_I(W%EmZCcNR+S58ZBF0Ds;B_i>xi>NI1Zai=R4Kz3alA6l=njn75tB zJ;3MaK&WR$i{E7SosM1_ozGo2|IxLUa1OLDoh1=b63D+$vt{id?%ITX4DOAHqHU7> zt|2n{#_}uw6IlfiGwUmrf340KZu-+oCOHVN)H>xVLmaJ^kc+~{2P=rqRV|k_8aszg zUhthWc@Hx3wiTyG`OSU*v?U17=?$d*eiv*{SVQx6+S1hH(3_fD!gi?jVUl>~w!^3H znNphI`UrPmH>RCb%Cz(WJ(X%IpKz%_9S7@&IIb*Kp;qe!?-QOiDp^H*1U7r~BE=R8 zu(0OSNjM;v!cD|ztwZ$ack-wT2qLeigVkC;WorD<;qr{5ExhGYS#a5|Smu#aAZz82s6JQB@dE19;NCZJeSo6y!vv7ZJ_c zBoBMgG`qTqRasWjfp)&g)r0p1FSzPd)fxvu2n0%ipN!g?aq4wUkMVrygV)*b3@0xE zGp2qT+9z9qBk1bd3`VR)aq6y3sn__B>=#DsVGlZ~6%&fJyqGh_B#!3%p`j$-Glj6i@iM_lxEvP#BfkZ}19Mdw zz&t}9i#WNSVB)*J-k(lBUrdUyq_u~2zYq^b6!PQtsECD!45Y$*x-3SZG;~ldL~rzj zg^;JakSwdTr7G>%j5P0DDrw4NYR^VxB5M9bZ3JEm=S9{%;n89asm)uK9Ni}>_oIdL zPc}Wh!-`x0Z;d)u!bV(81K{PXWT;h)9UBou7P4(@Ep?jOR4g=F$bN$c(1_5X5>|mV z6WpFM>8}HAl0i5kR#2Xw;&0O>W0WFSSV4h=VQ+y{FXS8&JfbDNJ~)>FLEW$E3E>Wo z3$x?AvbQEu$*QtEV-G=`hDsr%F&jL-fI+=I9%;v*BITR((LH-`fW~gNvODig7-DX;aDagvcfNhxWc)1jyJp2k z*r6o4UMCU&2Fi=fnYJW$-mg;+xj-e7ZU~mUQtm(1busyO?DlnF!#)4-e#mtX76)WK z7XWO?n4@2wym^tdx!_+R2BWPj=9*N zrc{Gw!d-vycPM<4yw;^>@wUD5eT9~rGYBt;txGMS^=FBt>{KDw627c^p46%BEnEi{ z`EPDKB=6_O(SnS)F7sXnsG4Rq`{PzwI>164+Ps@UAWjCpg50D#Ti#SP+Ko5eFC5hJ z&+f6F*?yAX@rymzv8A8?`!f{T$RWl`0?O)%IDVlwA`$H{#wTAW8529}---5t+b9VW z*TzAk^$>X2i@p8%vzb5SOT{9hID?ro7}>zPgM8gp=|`-Uvzvf2G+|s4M-_L4Fj`7T z(Q5_9U)6Uvn)?Z6g0P45_9ichk#p=b8w1{vqM+E8IghB2Ls%gp@R8z)Lx>2S6H8%q4<~?-@3kHNpsCc6xZx8 zG3d$aKdMM}@DI5(k!5Xp&#i3YAz`#eFy|R!68gug<@b+|7O50(KIrpV*V;xQS2k;b zsr3Sic^T~#E^Kb-i&Y|2Z&$lG#+AO+3)7+aLtaQHeNUani%7SY4{_YslD^I>*F6O7 zSNt5o5C($zMa^Cx9+Go4$RBNm#k+2XGf2p&d~p-eOICDXvxZMFB|h=DJrRvj>1vFj z)eRmyk4rbpEjRvrC=QT20VIUnx>RIM3KuQEyQX0Tg8nwR6){3^5{H%wbMCt%v*5C_ z9E}Y|0bTr>vddd-^B|UtdV6x}7>W%1s&CsEp~+9(+I@q=-+lgqq&@nEQEXwW^I-uL z1dq#b6oBE5QvBu_5WOv}_}InyL1^d?HHqUSl+>B;R!8rwrUCY>HRGI(cZedREIuOG zC9J*n_W=2uY1RDcbELEZ;v^MA)RYJ`7JG?hz1KZL%8q%YRjOnlA>|GTQedW#kd5+H zHzI5;akz-dTEk!}r09{(;v+Wg<9XPpCF**AfXtQYRp$69-KE3C<(#d=aU(=J*n&{( zDql-hfDD{FwHN#nlQ!-gzCF-_YFYpFB!hwfk2BMYF`6azw?5+E(ZA8iXT7$(KwZ~e zB8056bN!K*#v*C;+Hm2olaAcbLf@dEZynXS>L&pFd}mGpJ^Ri*6Ti5hYv=dk0H#by zGl~+4sf7b?EVB0MxP&m}Z<-~MeVM0aOy9k8r{DL-BrK$r!%@n!U_Z%3(hcIl^3Yc! z&;0gOD@HQFqAgS`d4`-5X_HyYF5Z4mOARBNr(z00#H`h4u&M)&i}U+c`7$*0U%SEI z@Qn`U9ew($FxOwl^h97557Qn)+g{>a!`~MBBTa5zKFIew7N)(#Dd}?3e>J^c9enLZ zdE$O|XzJ>6ae}6X9CcjSqUJIcNY$r?^oZlOoR@!ik`M-CBB+@&5?JW>ZuA{{==9_B z)05_$SINQr_>!f<0&4y=unzT`)7N-9#3=I=DP(87#PLManwi1S6_x*;MzD7W-|7yy z8)3~VNyu)$3~XSqKgl&GXcYs8)0-hn*1tN2sP@@qE|c=!KeeVTrd&LEGYV#mlBbxV zD$O$*XiMW(^QVrKEZ^X?H&-pgC1vnnS_rpGzKmJ5NvXq-uxFj1N=5hArM}sWzfEcE z_07_Vz&V~+o}I#lI;!BWy+gu_?26EleIx@Lj0onl=*aJuEgv;Snshevk;3Sr8FdGw zZodGt`(2D=a*$R*6WX3uzA)PkLqT#j+T{iTuum-|W%grLNRvHI?ot0%*?*3u04}{3 zIK*~w7)o;3LH_9QT+#f!eRc!Kh#M5>TuujA1_;iu}{C$BLN?RzAO3X(%|tUR-E73 zKVQ$hUDjrQ@T5uKW5-DrmAaw<-a0m5WjOPeUA;{+p`N3x@P3cJk|}jqyIqUgJse8z z3~z?tUI2D-Z?OfRQQzddT)xTIf0R$HjXXsG-*XQ#ATrSmPL#xrqK@bgFEYX;*Kxlg zMkpd}E%)>zjV1q3f{#%{^+2TvBwed8ZU`hu+3a03a@J*spwY*RCXQ{x0EB@5_Ic+C zEMWczio}L9L*z?%6V=p-K4fK*FvQ z)XSoGd9qTBbXf?{#GU{$fd3 ziXlgFA1rd+|KJmbyUP2OkrTD^DO1PIE9uR0JsaH+@1b<>xWC?TSNYGz=QqES%i}V3 zy|R63cc%C!D=(lw5K=0Azef8S#98HZ_grz&A1-8pxUDUHtSFgJlVAJ*q~hkg7^OSt z6x4ld5Ii|Jq10aRi7Yo-QxIEagNclS|6`@7t>9e553+Bl@^&&^@$Z@3THp5)n%YGjm#WKPBFAc^MxWB zu{pG=KZIYEA-yW$kKTUoW0z&Y*0*yrr?Kh*#qeHezu8Rd!X(v;M>2)vS>ieA{-bVf zn=CX7PmA||TQ&(aMMoyMuNRPbMq@MQ7pNhSBe*Y`7W;$-MM3~yd8snn2%RN|I->hWeBiT0e{jqEL*Pdbq`Dyb?q|X} z4c#>bJoKq*3LSdl)(A2K2v0VNiLJgf#WAO-;J{XrrR zU&DsmUEq7$8pyRL5UIccg1?%}2CqS~du-y8-@jY|Q0hbdh?;dQCO}bU{uKYeU!8~+ zr%@{+SP|pL755`b>u~<>+)QTV);|XNlX~A=T_;T(M}Vh?mkV_J@eDCck{9Q!M20}T ziy82LUH>FMlydlN2|aoE5s_PX$nRByNsMpvovAr}g+$uddT&jWtOq|(AF9gi@K5X~ zvc#dEx)M$~vdBQ*k}kjUiMx`JjX#$=A#sU0+dl-CSfW}MTzwIW`&ZUtUZ41+M++U9 z$qDW=;ROqj#dlx7Ft--v9G!vC@MFUG@2R4kPeq@WUw?Cs^vLkU`zH%&8cQOzJhY;g!Y)6$IbaR4s(Q-aNIn|x!lNPjn` zO;nJWxuFi#fvtzM6m5YAgg6oCZlD~E@ZG3*|;L;JX|dMi^1h=7L(j*YnAjX=do zCV2K0^yzx^IHV*rZwgA1a(2c3`@XzqKKNxNDziF6!#Jhd%o@*oG(A;tS3p^yC40Pd zDyL~F;070Ln>pUO5Br5))>orm(yTKrzt^E3{vKOj)5T(6vML364F1{so(U#@npD2@ zSq8=LEFNz!9+)c-VL!XXz5)cv%aZYD1O$>TM+98bD@ESWlTKI^i8E{!{e1)_BCbC* zhoL8UjD$_`TU$AM-GR1x!%vvVgCv;>>_ZAQ7Q*-fP)}HW?Il81v$82UAnc9>&yKh+ z%~be!|7#LW-5N@fXt<^HCyJq+`EYT6OgsdI{dBW1Hu-wQ-ylx8q{|MW9L~uF!$~+3 zJvRIn4*3NOrhe_9MDuwQHK;xZ_;?@-aTH8CRZ&p0d@5TnVL9=q4fQ?ev6OtE8^MMDM zFR!r>bYY@zi=KX9&fCA+`zadp<*-Uom^|@u`1G#gkF>?niob;Q$@j~Z0fAh!cBt;Z z@9TjBk^_=l+)J5$JsD<}N4%^KmUX7WP+@)r3dOw+2ES%m#0dQ#gi#+5(M@Nj@S(${ zZ2z<>(c|)tk}z8Vzs>!h2pwIBQB>dpg+HgSsC95ip!u8;z-MYIAABWrZ^dBFR5i$O zeqoXPtKW-P@Qx+(NBk!KiXS`YFB99du8-zLLNvK4r`?y?2H*p~m{tPRB-=ID0-MtT zPVbCf>nfnl-PD)jRPRkDaQ0k%QZNpD9EzZL5>^RIaYS)fKCynzs-eCfJk9tRomptA zo|h7ACIE7h@?hcKY)0j$s)IQ7e$y`CEUIOf${(6@pQN&X@&%;ZQmo86^ToA8AJK{3 zvu|HMFxgpYnF)s2y-}5R7n>gEZEN+-Rm<gfzT(-U-jb9z=Z?1A_$P@`?O%2ofaXoyvl2g=o!X&%;US)6gs+T z#paYr05)c1W?*V=op)U#Nmp0rb&|i$oSWSg#y{lcyj}I)X%rPXUM1ZA`|Y3EG(g*? zG8kFi`RQaYEJSUg`Y@iKre*K3_cT>;Bq8wcHG*<}O~wF3-K5nHE+e9OS;S4?7$xXo z0i?-fN)G5uPreKR=x0ck3^y_>)eBfp=})^**4h=nN9{Knys_11BRirK;;PJ!#>u`5 z&+q!KHE+yh6r%jJDR0U#B6gy~ESFX&KfiNkBPSfdSq`B$LisRZm|_y;81S#Z)#0)9 zabqQ4cwXE?!t>Pk;T3*QeN%bzXUb+r;`F;KkXK_iEdkdlgu}rp2gnyVx%#$aL+^w8$EROj<_4}z|dib8VanYtlygh^2Wm{wc;CPg4JT`>DA z0)=e&1N;s4Vs=MEJYzeM}G_DYx$5n^=Iw04UG~$?fYtPAi{^e$2EFgMdTN+(}|i zI*h<~2u-dVg}KKs2-lfn+Bkm3TXC-7$BRzjS>OLLoDl7QtIZ#Qd8<1=^@2$55HSWu zOlNMvG|PQuF0ilscEAoN0oow)ggmb(1mVhUGIP4)UY{#!ZXi4w7uwA7g(vHY#qCJO zv{d~)YCIa*`)A$DR~P3u{K?A(-aq-rWX=;O3PLcdQ&2Yvl>0BL4-(&nz1KuJ(6TpV zcDpQjxp&XPkGYAHciE7I{ar|hP{dtrL89R=;fVryPM=qwRdNr>JTK1|zG{6J_O!B- zJx^&v8;Cbvg-mpYZ;sSBVkkivw+5TW&7;!Y&X-H3;GJpI)|QCK1g+KzziJ8!AP%W` z!0-N!$R~0xR9#=e3KpI&ZCFGCx<~gY2{*f(pkH%;W4K2+T^B-zyiQi4Vq=Z!-;M%k z4J{3gGozT8|NhM9o74661HyMAla`mYedQ&|&BiPRrL<=AeMp`4?|}p+meA+K?v*7x zU2z=4#c27@Q-#aXYRFxGSukuq_qfm3GVXy9XZGl2!EcPV8o&M|ZMRC*%a|5;$mikw zgGFVD_Tg4KyQlGx59Ky$rai1joA+hb_}{kUz4LtX1>IoN$^aeg z;4Is#sLk8zQu*?j@CC8(dwp4gDuG1RfS<$?rKgjtok+u9<9Tm(r?OQ#9I{uhjeNgK zShSXqLBegp!h&G6^pt;RVawHUs7+P{?n)?sfE(ou4 zA#Ia}duw%j$ab~~t#fT@5Ghcvp|JefBivc-20=pRXHBPO zW9Db`7MNc@?e`Z7j<$6uTf3>~1{~drO;s<5VE;(q?ENr-V#ybGJMd+k3fNx~5sPb5 zzlwXD*W)S_wi3u|gur89$+M;}9d&sIjr{OKY%s1buT?}@S~(mfM<%mrf=YUc4AmVz zY(Mhb{644xP$fB^n6*lT<=Njpc|OQZuvFN2S_O|n-S_7g8-Aaiy%alM4aGDeAoB?i z^}!ftY%N{XQm3EeY5ZiySR7R9Yg@8D&XLCOEzbMMoy%7Io~qECZ-pw~Fm(;(TKdp+5Gh<-Y(-E2%KMtFt>dvkew5=5+>)Io{-bOI{f$cZS zjDff}gEw-q;1Nk7hu6eAgsl@S7rzS|9>+AuR4y=N>+{$bS!kJ$t9vkp*9$TT>7x_KS0Z#G7=kH-ty*MDHkeLeo1cdrsYik6( zj?m7>SV7!+>Qh^4ggKF+aZn9~$v04=Xeff?dwbkYkqN_;srsfq9p`$lsx<<*8#%Ft z!1!*zS^rwyg#T#s=W*%D=z(GNWV>0@TXy)TV~H6xR6ff~R}7N5zZD7CAOIUDXaf(Y zPB87IZ5+$+1F$GakD_~ruoo%S@i=ovHDpk%;@EuffhKNDeT@=l_(O0(P5&sagR_K8 z=$el^NR!yVkyi#b!x;URo|ox+Cq4w`b2%-%(z34v4}4OiQ*)LFGn58qHmWZb$RQXp zu7qu7Z?F@7>01F zja1C!mWkUAjHlA>Jz^(kwqaB}Jf7}XT_1ls7J)4l;$J`n@#UOK)^$X0dv?9~Z0$_w zO-y~y{si)aRjKL93(=D#dSY?DwiJKDW|)01;+!C4H*2DRt)*^vQ~z(PmYDG43ciaF zC{&$Aztb|+Gz%OzDw%y;ND&5W#W!4avHk{Z<>jJQscG2-JMiWxJ%=?q^f^N32q3>f zApN*ML3~mc)xG7vl{@E@TYz+U>Kwu^Eh;&Q)sWxzn7;|qpxPJVtRTwyTAM$r;!q}$ z+RhC2OvDAL5R2R6@sIRYv7{HElHP!euz@LKfpNV17-LYt(&`kR6v|#Fs&nLI;CzFM zZjL*Z!bTr6`f*wAxEnnO&t@1XPAI@RQ~E4^F^15(T6l;|B($2ykC`kYCFRYFM4~iZ zT066DmuGruaUrmo3I=g@jO->E+Tr7~+~io;!Bn2F@~tbEeH)`%eI{Yz`qQcDwuJOz zzVC9-AhZL2cD~n2SufyOM&}dSOm-RA)EPrDzH%&?%7}gY4p~V&7P?Pd(2<9!XGc?| zw*P5)RQm9JmSf9w9z>^Lx3f-hpGGNLiWI(1e2PWr!r%L~MX;M=4k{ED*q;#TrEMeu zfo{p?ni?zxxq~`5%U)8~X;!L&RKh3 zV29xvMUVE_=4mo&{|1GD+}wbtZXhf;NJzO0_{w8XDO4GWHH%(lBDGQR)i0iwEB z7(Vzw#`VI&`;g9pIxYcHj`PjyESZy392~!YxV-;(a{oc*CfpL-wf?z-M zVZxd4Jy$_%2B?0O8A}PGPsQ zH7BPm0mowB;wxNMzvuY>MHF`X-EjPO#N++V7jQ&fT^(=(23nYQ`xp2s{|h=??+H=^ zpghdq2M4dO0{y;?ju!m)Csh654Xmn~LSR-?w1!AMb*jIvsquhe5tGB_6YxS{SDRK-Mz84mdhg?xE6s%mUVs7;~OG6NlIFp zyfO80si2>qA2~Vs0jg%zBlYxLG)woTla^pXmM|wu6kXsuvNq=&a5GaDHNT+AeLYA} zSPF_wl-{zI)21EcyjM^-5-AtWB?s!NZwE=wvpOJi5zTYzS;L0?6IuHpdDPuTEC>ir;n2-LB<3-?K+_-k}8$0SUKwm_p7)_&%4lE>( zWs=p@Ii{oYBvhhFxN*&PN5Bdu`tJOcf#5q1^$Q!ML>4l-nzPIV@tL=vq zBf&LJPsVMhCbH&-AD7%VL?bCN1FovX(4{yH$URHpB^XP*Fd`tw&&B-h@UJ{}{d>(K zs$i(aA35lh5JXVdb81EiM521kiD&-ck({ZCdmz+)3&Vqn)z(AQkcg6lQ9>Z`MEW^` z6BZPQj?~RL#c||)o8@W3g3eq}v;3vX5OR#?x&)FP=Tywi|IYDH`&2aq%<8;dg+=l{ z1fg|0=@fnxfj^RrRsPF#tSvGoW2F3pY=0jx1v}`yx58AG!*~^T+r7 za3io`D@xmFyw!v-G1DB(8?U{#CIybzE`33Ju!xu4xB(Mso+h)bSsp(@c!A)1S`?9b ze_A?k{>-dUDGf5m9$ZPj8+W~tp#B`?R&at)P#EwtzebfN*s83VpMlNM8L_;Qr&&^i7Hd_G+>U&?meRucL2x=bmoHf z%<9koIW=H@v>hzCalhNxkF)f}#)?um;(@|)Ec{sX$R4|yRvoiglK6X21WrZLiM`hn zrx2PtbqdrN(K_&~uhq5juMzC+k`z;+mg)~HrWI1?nr{JDIUakIn4B=-pplRj&`<9R zvCZUc$9s*wEFGdfW;pZv<(+%B_`&csf+x})Tn-YFD$5 z3LT^0Cu`^t>*)Dj_f1@&%`njZ&T<;dZl^W~)emBza={|0ii7nIxWZIMqy+;Q+Tu0K zPwZ}j+z<jLyEA0_hK8p7aS<-88=eK2n60Hun9) zK%k;*Q>ohOn%K`nlN^Gf{oo=#D$LA4ls;?Z$)9^#GkXd1+Jw#~7rPw_7|dW&xh*;k zdb^mtLX(DHpD^RmG&Oj9HDLi`bksep@1si!RH4chI3tnNSR(S0b;_rk z8spnyJ3|&?b=Tn=p>ewqn@H<`$b36gW;nbIH}u}=4n0^4y~}sN31#ws^s|7DgoLDwH&+` z?6_U@?%n@!3Y;a>4YO39x$f!(TGVAF@_$^b&`7o1%&3jV-4|;EHqP5U6zM5 zk_-fhF{{_HHnlSH@>kYk%!w?mf5f_fyp+{PGJ_0*4K;o99p2MFo;qW;6M39LRb^!O zGcW2!no)PJjyrEwPA?ZNKaWO;U8e=%N-jXnU$+nErGlFs``X(dF)W~peoX3gTRc_a z0m`&ZrZ4aSba6mao=uTeo5#o|Z~K9~Ldr`~@EAryL*6mR%H8k*r?BAVcyGOdFj>0t zwJF1>#dq8rdHS?zeKvVL`#_bA+l_%@=OrE)`ENDai;teM^6FbyPO9Pn_#~ z0#yQ=&-4aP#`|b!UZj8=45@qrrHKA>MrYkf^6+$Lpo6V3J)v|yXfCZ7^lgN&E0ge5 zAtbRihCy&jEBhvZPc`fg0#R0qB4~(Gstb5&E)%x-XlyZUDQt5WE3r7< zHH_Ac2aNBG9Yjr(t>~v=x!LG`9AT|n2}Bz_GJrE3)}X7+qD*}Okq8aQ0DP{z@sJ`` zn3_xRJazh+zubOTwwJcYf53~>U@RLeQ zKR=HQaZSOj%roG?H012={(j=k5Trbyse5swroIb!q{jWZv=0V?T7`24lg$tY+iHMw z@InplDCPnUw!5A?Cvw)G+eQm?4tPjQfCkJ^dO2hHa?u;xHn zsy5^xx>!Kv?%Kk0K&(KdGdJvN_x@e+lLEHy|Nel*o7RSs*!0NU=4WLiAx2PK_UsVZ)QL#_JVaR?>TVcY}& zBeOH+(&||CncavLFKqZsWvvx>NW@cg4u_B<2)L0s$`&CPt4=ks7E&T*i`TW_sehySvakVJ+}$ zfw1@iJ=fUCp+g7CInot|r2pDk2M79i)#LX2KLkb?_{uGBl*b*juZ2lV1)~h2<|d*>P|oDKC_Pmv^hP}$9#Udw_FFf7 zDELO(RU)s0PqzZ8S$&bt`N&hSQ+L8&*kFJhR+BtR$*czV%PWsn|MO27YXV@VoWFI`FZ_`-8*i{>HCM-hSN^HK7XjOH+ThVA&T*%v2oD}cv_%+m8Rx^{%Af>uLn z1zX#9Z;6uEtQKpZd#Im{{2SD%V6ND>bG5{oBRGV%sGXxU4A9HWFoYC1G&0FYvR=R$(T60hF6H!;UWN>ckB!qyYL&CD4)k$s}h@|dVs0Y&p^@@eZ@{_ zq$aTfR#9DBlia(7h+@*!olwySEAilMUh$4x)z}q0GSY^V4_R^eTZB)>sKfv zaI^lYFgnI(!E3{U0p~ZopGqq}n)nY*vz43uroew5_yfoa7w8{uZkkq}Qy`{l*ZH)Wq zK3T+_<99O@vimVIrSOA3qF-QakS-a41Bf)7;?$;n%#S?=^M^uVOXN9w$G=BQctw8$ zHjCe@zq@nA+z7ryq7VyUIs=gMVO&ViUQ+e1%FlonO|qiy(v-Pkn_!#@QsC_z*sGN*u+THvA{-!SQ- zMcFd94b8!)_BN!86#(1xk?!LB62)pAgEf6-5JijT>X8FAPkaf^p?Vxx{TgUQq+aNU zO(py`Jp>me8-KdLX1F0G3UW01&scUK8uEWkLv%>E(Eqn(ukQlrTI+|0r9r%aAQ1_P zp_W!yNuN=CN(#;c!qDm2*-d5&i=pPXZ#!wNt1Bz28kij1+`|}gUn3+dWWY62z*3E& zh)+Dot;46NDTXCP^6XDx{L)%4fs+WJz`~aU4GqmE(xjDU{v96i$yo=pz8Ttb2VF(MFtxgdisppS?>cVntI1 zCYVfKQc|#Ajx@tU25M?(Dl@7G(Dv{-s)$c&oru{H{vqYpQaH?dz;Y`J_*YTd2pW(P zlhKZNYYuQ=<~d3Lu@!@Ha�(=*Y1V0OuD{6XGAD3fx&&g}9uC6rH!pebN@}o1BcM z$>kFi3-^%yIS+bVj9O*(1_$Tk;1-63aA9cw0oQlUi&b)Ba0c`-p1jjF0{iF}7Frx} zHxm6kc1j?h2XVykZHl9-m#E|@s+t0(cl^LVq3t6QFsBCr(JVtMFT8y>gxZofCeHB3I48j`mH>Ar5bYCForc(AoIhh0|?Aj}lKm?8Lgc!IYRIS_#(Xx_4^T z7@+wOpu|N-N1p&T*XsJLnAh-UAh)Tjlqdk3b%3lJI3ixQ?A%GfE{40W4}Q1+A!PH; z;7)k2y`Wzn3}<4{T`gNv zIHsChNOa0JmtiSLlw?U=?%ZhV8co|ea#ePceQcPF`BskBnSv<>7ZPnbp5d>OMSIcf z+Sj}}(s>*fF6J?-090nMo??{3b3i2{Nz~j3reWW1u1n0l|8sc1%oI0DFQrQMk-vq; z0OnhUu3sq`tmwsNAU{9kuFY18B$+eK=n64I2DQS#V;Wiel~Xe9pfVex&30aczfMQ; z0{)iaD^3mDnYjkX-fSg9ClwtG@%nb`@?!)DjYH1+y#}rrxS-rvZvcM3R++E?-hWQv z>AXco`b!gUSW^}(u`6~%-s2VR7o-{1(}H?h6c(*5B=&sD1hWI0rYZqNm18Y2(*D20 z9gJ()6^Nnh?_M&BF!!r-HT`qH`sZ#_Dr#?>%Ob}#f#!bR+sX6VH>2I%)T9E-Js|aTCdTM05 z&t3jAu&n!hzqb$?fvd#g9(!s=dCv!~mjfKx-nm9}o6Ol_b$GUvm`+{jp}?9>#P(+w zEmwyyoJz3!m)l}z^XL}g7A(}xpJa!P1q3-WX*`D2{*pV3mI?&Di)okbn-Gmh@iYzn zPt_u~g&YpsNUc8zGPhAb3!yrB8ms)15dGKcPrjj%VJyeO-2X}Kto+sZ@t%+y75lv^ z(__edFXvl@PRs!|>W#nySmt-d?wi0@7+S6)mFw^X`Lk3skSWI0EJ`CpIpsORG!SK|ni+p(~m@0IW| zNpV`j-}T&|t&0MayJJGC66>Ycir$=Psw2nfvpnDWy!u_p=w39anutHqOzP!DOX4Z*+AAzV>r(D@IIW8)Y1CjTadM~B7QQT9xS%*~?1gS-Q@ zn~strs|rpsn?mO|s9cqJvVGW%j;|kNh!v4WVLewke|vK#s(6L*K1)F(9m!UT>Vczw zArfwk1Gjv3=t&o^7CsK-V18A^O~>m3*EZxaJucN;J_Vj3`J-s9gAM#MpNIpZUJ1Lh zv_jBx7GH>aqEhY!lKgXtqp=foGJM(Z>=MHNSb$u)zdl}$vQfHD+ZAD4{5baTCsRBQ z=XQ*3dyg?fNF4mdmThML?x{?psxPnsC$4=h0Ua%8bw4&wmLGQVJ&XW+%fh)BQY2SC zVzv~#DuSN$Rh4YNvnU?+zVo?eAz#wU?A}@d^N7Ml3Awt zEE^WWy8Y7q1anh{E^a0SI;mYRA28GD`l;VQoCSe4# zv}ka0N)V1#fFD$%+=B}WrnjUTL;Zfm{)(?ZIqlA-{-ux*Zt{qqSMKwsVY@uVq#_@4 zIGm0s=kypAOczpBpigh$_Za=nXA(;(`HXIl{orn@2znOQfA{_jeILD0D z`W6zqLwtgjleRpYosdEXVh^yF zkn&$6b_viM^T#WBrJ+{ee}oPhy4di%4+r=-O%C8li7o$QecH0giyp3$c|8 z%nOy^E%5rjmViYDKEhf+d92D7Rbvsa#ckY5t?W0?U<8fAP$aAuNcS|F9Te&|<^Q#L zwKI@UcX{@C`J{5&w%$$WO^O?EEh#Q`;G-4+Yy}OT#1vLaDZvlXnHyNL>xTe|&}`bu z;A=c+|GCvOt@5*6$CYl=uea?l`^66rQJUlezYmzwm6(0(xpeTsn#aD`$fRbrpf1M1qX( zCpcK^e{0iNjgB*VhxkPu_#L^9h}y&BI^!2bzL0-=-+S$aUd-_idyC|ArO_nQT;!Y* zkrn28VUuxJ$M;%^Ub8Y2GyCNQ8O9fkNW-#|+vC`6c-v3K44T4D!Yjscv2GiNk;Yse ztOQ~Klhqczq~M51JGBW2;dj9$QBWlYiA9)-DyLY;iM`KHA9OfP52PgoQjTEpVu^T6 zA1oXbv@bk=VL*^M(^N3m#Rhtt+&E65zyT)rx3 z5EljRuH+fpsCQH!iwz{yjkNTM*;~{Ja8dfB&5R z6&mCM2zV$`I@8xGEXF|`aBQ1Ukzvef*V!b#!z%#!oeksLOoo*D$FF9SY+Rb4H=l#` zLpxNrT#gv{zpIB8F{JnPh#Px_;38mb4z5V*f_VOKVqxl>0x)qu%D#6~5Rq2ej!cG=d*s zhFHIC>&J95we0_TZzCGlFK6G49U`|~%>>rED81dloW`x*OYu)>xNUhNe1e#&BG8w> zVY!9&3}3Fo>b2+!VcT6=ZM}EH^>%fcgfekK8B0?v6V3Zw=U-kFs=SKsF4PtSb4|UC zOk{{=)sSBp2$ZMvP^&GmY& z5n-oG;Bt{b$q?$2$NT6rYg%WC#FwAKC8a`(vbF9b5@?Q>(yU*$;dG@iIG>zm+)-da z7F5Pidp66roVgUzNAI`1El~&3yP&1_KI{WN#>26IpFb9mM4!^NVU7)@D_iAY*&vCf zfwfWNiJX2(X>JME9pH(OOc>LB40#cnp%JIVx!y{YxoOnsAQxej0Yu#bjt$onhu z|F;6pYWT4MoWBIULw$mAcB6=3jGkg?Sn{HLxY`NSpq65Z`wpvPZ5^SXg?}EQ zhjJ4kU+7@XQ#1^-aWK(HEA2-$`YF;tGeCbMaaeHHC)b;NVOS4s%|-)K6oZ+u_S^l% z9yz8_*7adK^s&r}&>sWTi)HOi7*0a(fx3NPWc|&~D-)EagI;qx1l@Oc;O2ZGm+prO zdmfFQ&0+G6J{M7r$T7h84WT}VmFe)4z8FDd?s7hAir&yRaYI3pK7l=SF1|dqL?aWsNhMcb;^g*^4}l+ME)K zhfMrwTfhI8@(_%z8$~=LTK$88r*$HA86myNL1;8l3+W>vkgt1y1wl+l9*d z)q-(EGZ<1e!kx(VR(3h&s-odaf)+xhGGA*;_4g)4^;ZH+*OuQiCO>B(Ue&37W%3jF z6NVSHYer=jPZjqF!OHV`?i*0lQ8Hk<9t%!5 zc|ow)(%IK%bN7S+FDKp+{o^>BO0C_I9x<)GQe!VgPUi~oAyO!Rt#B28^7Behs?)cs-`#sK|-=WQ{L@rmx>1#?| z_UrcHb>afD&Wgrr=C!<}NZJ%jA#vcW6uY2~v)tuv54{!Fa5+7mVH-{w4iG194B=Rp zY9Os>rzgjI_$lW&$WPpa_zj_b?fDG1U?qng+JV#C4z_N(jkm)z=M2GrgU@L0U#kE} zy+u^)TdLdNQa;-+w3Y(AkY+l{XE%0Q(>~R}tgK=T)tPI`_nT6|tq9HZZ@o!L4Ty!w zkg9(R5Pnu*=xu^EhDK1SJ`a~1HW^1G@0Z9|Sp91uRcNuUK?`gg&!n1_scB`>+vWDp zYi^4!53u(B&W1Cylh}F|Ci0n_>$7JcNkyCT&&u#*(c+K=`go$nY6CxqYL>5HsIWmk z{%_(-mgnLG3$&Fn9iO>S!H=KZpbo7@|8}F|!=?^4J^7vf+Kk{KU^U)n9x)2tm3x@0 zHcl3%nbb$_46O28HjW%`tN^BOv&dE&$G8h`IJ zXu!-)jFt!~G;qp6IX?^!B4*%|DZZzvQwtRFL zVr->5?K-0b5itC4l~PD8EkM^k;PG8L0~o_3w$cA5{}h^Iz>Q9~1CGu*ZF-xu$TCSa zbmFKg);p-H!(Qy&NtU+~x!>Oa_UDk12=AalkR`h}(t>COlGYO#k<**q@Otw7@=DYJ zH%aGV(~+`Ssc!stX1MyE#sM5#|DY{Py z;#B}nbhxA`<>``U|6*~L>?5YTe=M`R4Zj}%1eivY>Tr3mibJEi+!5T(Adk4+24 z*VyDJ7W4IhZ$e!G=(@tF&u;lu>_3qQ{jYh-fButUE3C$0#nsPuYxOseod+M-+Mw&N z zUe5JKvU-jaOG|NJzFm{P3+-I5ku<~XZ2`9JC}LnU3t!i!Mh9W2hGV>(XTOsGA8xwd0!Aya-T&YU?h-& z`8!~aC+4W1p1XFm)SkvuhA;W=vAj)}tDj&6`3|t^SgFD8@1eYf$$^Z&LpH{m(HB$0 zM9Ht-f|sis2;8&QTHo|6!LsyYurDS;T|sgl{RGc?=dBn3|DzbzZF1Yn zrWMU;N?iT7RWu;evtu^#(_w5VSb(ys zcI=ta-(o0`*4-viz6 zPngP+ov~@jGz`yV_YL^3-iyLSE_t+J7dWV-5?JpR>A6+!yRLUK_~J#x0c)8@V_aob zVo2uh(Vu67FXL+vE;I|^5Bs}_h*+c#PYPccU~8Uhgrb6qIK)2ruh0-tOj6+B;Ks2c zo|io;S23i$6^#Lt{eu_=G}-X9R`z^PjK#2U^fpi%r8^1g!+&Cy?-ErE4>Fl0F$=bM?;Z##XI;fZ;5r0x~aHEbykA+M| zNeAlevj8qniJ$F1mc1&qwpLFv+>h7WdjA@&4PQV%R%f{7O0eh`5@ub5BjEVDbgWVHBUU=r|5`wo{kacDU}hfTD-m_&xr7=S@m+nWe*yDZwb|LRiUmBE31+->%X{Arbbu+7com$)S*m*=TcyK_7`B>hOKz zri>vG>9$!`Qj@dZnk{g(9chAYeAr`xPOyylnHGx4DSk{^ZV$W5JvjB6usLEE2|aZy zPsdki&n38toR z88ns4Te!&=H>WwI`9wglBIPuKGK=IriJg7V)@SZn07VUZAJdkefLl~{%Oq1iRCqCb z(VmNcL_|k&taib!bW|B`8V2jZAM6KM2RQ&*C6@<$P7mB&^brud1R~YaesWL$IBKAH zO5IFs6F|vK{Pr!Y)0PMoJ3kf%);e9sE?jZ3#R)QdhYz}WYR#gVCTc5#x9voGtz$|T z`ge0SYtUys{w_L<+etye$ocg#?x8!6h;b~x7&0Qa9_z|ZW1XMOb-i8Q42(tz+|zoG z>DHnGVIPW)o3O(hx{M21z3wH+CuZ52QuEz4h)WQvaH#8LIHdxA2Cp<8I%r!ayH7>? zQzBrTmwcAF=zUebqUf}aUUJF$ecqG$)1Tq~^pVSII&*%1={XQZ@op2&uqDUemftmC zXrk!e{8Iyk*{OE{_1(^O41k~8V(5`?bC9X?js=Z_2tV3)1=+7<1xrGz-ChbOZryMa zm8E6-*JECh0RnDJy%SNBOmb%SY3zn@Eq@8Fln|MmP`)Q|^Mz?!YR0ko5jGs?fhrDv z6`Yw05%wHB^hv}!YOy!*lcct(7l~KqhJV_i(?Z5-FreXCN5_JjL$mT9A>W$)=|&kb zLv008v^Jt&4s$b@=)B<`@7*K9Gi8thA?Gd*^o`0fO)I}opw>X71#^O?ZEPR_Jn5=U zOHsF|sogE2Ab03V-O~ll<_y}$zdC%92%;eZ#jwg?QUCZ{_fWZ1OijYCz)y^4nh0ju< z<@9~*>GGGMpBx{QDMNTA&qYt5*3^9oZ*M@gzimM3>+neO2}74fwaGoJ8Ry51^lgD*Nm>R`p?F7qdHXk|6vh927uG&?6wUNBiQairDn;ELa< z*v!j!%b%s%3VZw*Hx8@~TuWrilL0xC2QGRX2=zcC?Lc4#5*Xp6Z#RKil8{s8mFfUz z`X|>*T5B>oZJGY{>Y)BC(WeIz&a~A_Kf{-N5#-eR%)GEqXr0iQfgs{3>WrVY_afrt z6sA@=K^n{zKRF}QGy^4gfHIjlvvZ1AOJB=!uw=z?#=oX7Qo8U69=N_D3HaI~wCU2P zmOh%gFAHzkt{>>SfmmSD^9Ez#T_N$eVPbJBFTb^SYY?r%{iA87J3>)Cm|D-*&NULa zFRlgr)~x3A8t|f82s_(p*`3N&f-NucmKTN9*Q9q2_LpD&;%Vzi%l=)M^0?i&`Pp*N zHPY{RPR>sdZ{7LAIir=>?KVp)?}b| zViS7ki#PLo(N5G|X#&D!IHUMFV`yD@3teq4j;DUi6NaPn4QTO9pO=@S^KF`e49D#w zDL;Z~Lee#>gwUq9`Z=J5L}tZMTh`_@%z#1O^Lr1am)1ssP3JFcp`n27G_dDA3>JL1 zKB{ZNS2+XkQ|R1XN`Nwgu(go!D+BKS*lCv^2TSdm zx{uxTJYpK0O5h2DH1q@n8E@$L!)p7)ZX>l{bC%>=&wq|Lj`_2R4mk3^H~#t>m^*8t zb(yBaB|93=FcOB{@-P2)SKT3XL*sC`<(1~U|I!WPu-4&(LJo+I@C1ee%v-Fg3X5Z8 z_AUbkLJ`L3WJG8=(s?EHl(ZJd=&m;)gi_!@AYSYkEb7r$+~?12ffSW9gRa@btWz2YmT*U!%$UTJH>!9w}X@#Je_*yc1ob=o^Nvr6pC6zKyp$a0`Rrm_y&iV ziZyV|l1zEpcYhAI7^3gA=#oaqCQh`hZ2o%n!TsX_F6{K-1%&R4ypDK6MOdVpGl9Yz(v?t-1nWE)Ogu9(5+3JxQqP9Ms}1ugv;tDEwM zw}Q+6Xe-zIqMPxr&26S{P94pzS2@b4+hKk-nAi8Y&w_C6i%XP2!gRNmG2`4$8_d-R z)*`H2%Of7smiQm#Z<8wuBCwRvB7PTi9L6G5SM!6xDiH2!53Ely zatC!&(^C8!G>CW892E`Am>uL24pxK?oSlnaj^47Pe1(j1IS^vgZhcA94wVXw0N*kJ z5nSps=x?FN@r<)AFjs3}E^4ON2S>s4jx9|Oqey}-7ulecS(TE2cxi)>-6&sKzU99( z&);#|*-M2)r;6Lh0Hm?bS6a$g4`GY$+Zam&PswfOV?ApL$0;=vIZfKD=L7o6p(zOb zf3E-j{(&^1?e(D|+;&WT)eRs9ywZGklNz7P!V8|r2GM0b8T=^6TUr^wPq`$`xqsg0 z*klbyM3BGZ?8n49DlPX0YYacB*>xzFo|j2DP@XNc`VnvRSeR9aHG$3l?#!~ z^17=Od-K78d_BO^>A*#G71O1_(<4A4nHCVLlUiO=+Z+|VKV|68CyxBz<3M8Z*ll?m zOZ8{>azG7A31NVEOlk0TC0Aj^V;xbnuR;q-9LuQ|OQ|#}5Yu_?E!yxdMiq&>N%<~^ z^;8>aXXIbp!TcZhnE>8yZEy5DVu%-G{H8LlyUlc}BZj1N-SaIvzKy|Hf0(_&b$)@9 z!px1ooq;jJ7%*FY5i~OuA6)bx4u|L6;k7#<{ z(OvR--44lVu|1ke|BWGNCtepO*OzX6Eqsg$IP7vn-o~Nq4OXsO6>ZVZXJvp1W%=?J$2@8qF&Ff!`g!JAoo^BeK{m#BNhyS$yzy$v+U&0=1H7)V_Cf8GbiUUu(XPXr&(hb~ z%7-bAVCI}Z`=_;Geb)?TO)Cd4tTXUi_|Qgsq!i96Y^l7Obk!#%wlGns-AnWi<>E zfOTVtIwN`{mM;h%HF=l%~jBV12lf*0HqpbaD4K;e_DpHX_YgDO}cm} zpy&2?G^7i`DRyqd3a4bS)@nJi9-mF%66sBRrbRtrGW;5jZ{iaR&MNCtBXzcWC?F-P z{j=O5bn3cFTgg^FXaml)$$T{ z<9BVzjTahxFMYu-@~gQ7==5&F?^)f&M##4*$fzs^d3Y8^g%Yh*WOjVRw$crensKut zNyH-^Epv9R$PG)nX`n?mJt4a_4gDFlnk)FwKmZ&MqLfb4q86oE_>2QulxBms(`!D7 z0i8}-6u$2$ut?l+@-k%iz90E!q8&anRRDNm4V<>b;thS6W?!kss2=1H7^-8x zs!%&LD}b@qR#szF+v+C7tG*A+j?}j{U*M>};^da~K^}5bBlr4#<$1#;Z@MH(~rF^N)jVL0aSb=7i`G z#RhpV_OXVqvm)m}5h zDui7IR}KmDA7@1j$w4ukQV@-H0iiO3l$F*&FMKC-!5>(`fC>pwi1a>VlS?gI0k@3W z(xzftxUA0A*aSnzFUv&rmADsdXYOz;QlG!fR|~o(|9wto_BD-Rf4`2A-L+KLI&s~Fk!Q|sNyAFYO|WC z9Bopuzk6m>HhcK6opsgMqz!)D6Z$UjO$=<$YBpN=R_@L$-!jtP$w|yIM053*Kq>Nf zbi!(iDW3Ik|LnTc>P672_U;_*skiXvgb0waoJS|9M)3K9mb|{bm@mY{Ec&;e6kI7t zv#!bq<(hCyyQWjA?%LT}g}whNJH{~fUj660s+nyq;p6miyr4MwANc?c*bc3?$ECdT zehIUeUgP0>3J59BR>p%cdbrUNW>&9;`Eygy{Ap<5_K64m<%mQ?+FO=*#*D@NozCH; z=bgfc$+{P(%a8RpK<%EWM>z5JIYWky_B(+YM98N1KB^KY!XWWekF6;$*OBlsK0 zIDGJ>U7P-Md~zMF7FpXI*lx(FV&8Qw^X*?lnMM96M53v2bl%z*al?iWW<9>Q z7IMdIYAE>P*=IzyYh@eE{p9Z8ubSh<-#QKM%(lJv&YqjO1o^ zvdX+;FB`OTW#e-&7{Bh#Z#kjIz0gGC#V{nu`VWc6k2l{ z&-Kfx;j89ro8q^*k)W0SItaBX0Ew#^08VU*52VO}YyakP%i^m%*XBiWRpT>@3(O5yDIda zz;OOB{}Gt68(+qFH;tWAR{?|+^@H~W`6%=k4*nd{nX$yf=UBud-zqN>6q7^B z&bHUDg64^Ljr`kBESr%f*qY`b4S_frZnxy!3RPHkFNm2Lh+_sSz^n`PHU8b^D`rAD zZWIfhCM|-l4px*1giM+UwJ6@&Q2Ns8l&PCb;NgnvU>@I!lb+j{txbQj;oRy2)kDq& z@LI+!D@d=O5Lp8cSI1FjzgYu&o10j5%9`5p`hxtgJ9&*5$2XK3F%6YW%)Wpi4?N*M zx!Q5hxcN9~C;D@BqDLSSqurn&lJM@c3Cb>~<;*g$0%1AVI;FG1Bzio*SK{}!G=9&f zwPu2mHfHgYm4HvWrs*;pqi|6vu`a~N?Br7z_E%K8HH}9kzY`LE2MEfyDG;uZpkSLu zod0S(n281^|2v$`8$RcuAenYP{ZgTz&X-)A z6CJM#<)L7R@io>vWQUk|o5xRev;yCs4A_jCL#JC$9E|W`X(VpG=Wpxul>p9$MQ2fz zB$5JOX9mO_wS6w*VfxyMIqZw2whIIbJ;w;Bk`O`4dwB5g$d#;gQ6|etz1fXF<(J!b zB}(BzcsUd#U(eN|aX_aQ9`cJ8A2ka}mbS3aVuf#%0|9eKGDpf=wvMX>4N;!rKEXgG zeM#E_jBOWRutrZM^pvFh!;t8pB2PX$%#h_3K)i8eo|jpYniFsE`W)IkpQ73Q?FZa< zV{*WxDrN4;;_ArxrPediRWwQY#2UWxTS`AS2;kSn1MMY*AUR=8EaQIIkyD@lMLM?@ zN&T6+Ol8`DIHvNg_Ak^yJ4Yet6Ssmy$z&wHSxyf^MW8(mvLHDhC6YindqqvF`XlaJ zbQ$m0B0zLklqt%6D%PK^r8ak`^DdJ|zW7>u_oSGv;eb>*HRn(u+zWGz+ai_dIOK1N z|4?8jTT)*K$lx`Z7C{=Ep)j#7R|C!EvjeE{bknm}=)>J-sUyeXGl#w)HMGq_Z&3^T z7Rk~-e?rBi`)D}DNabHT*W3q)MNcJrC9C9MEjBN1 zVZ&G%myysQHkTT9lPG@&;YFDgI{U5X;uljOE}rhU0HJo6h~F*uQMH`SrS(DE4Im(B z%bP1EU`V=S!9p7;ZmAjz_K{$t#q~?KE+TH#8B{-EbPv2VZ-P6{`6g=TWc>rkOV$$x zdhS-NfS0}?8Nrn2ay>0&+W*}aq=f-VUlf;FV2@K%qTxbB>eRP5U$Vw%%pUdXSlC!Z zbUy`7HNTGlAE!sOSA{}EMxf=M{&|0i;Dil8vc7sIi2+KzT_Ne@7nZen^Tlc!LJj3LOjrIkT%>qER!^lRC$7?rT6}CI zdrE}4)qR5kl9mf1iH{c^-hwly&V1i`B)^gXS?THWO7V{zD_H{WrDfY&X6EK)baQ^_ z0&eHHTtna9k^mpqyAn2$jX8bEj6cMcJ|3mPUrRX9XY`wybVn5kMG_{{IKt9jFQOsu zRV+a?jIFh`^#Z1~NYG}Ncgp{_-eSGQA&2XyrnNQQgMhW~`s2lVNogrAA)(^OCxNb% zRLJk&ia&nf{rB6JugLj#}wlE${cgaL;5% z$reXkA2&S3l~We7LArdB=N0a|V4Z`wS1zbac~`yQ_sbSYq31TNS4gy9vJXxd9xERb z$@i6KBHva&_x+w_RUQ2XhWDp=vOH#G8gEwaW@HvxGFhdJV{3abs2gt~nr@z>h@MVC z)()XYGH5myAt2pVaC5JISL^df4jq0ko`Mg9W*m-&v41Kr*=Tq zcx%cv{GF}1P=)~a4c{rknXUxmr$zfWXHc!RQV6!cy=bg%gHUm5S;T{~*ks&hu% zDu!nc-LOibucTvht?rK8=y|!|-1L)Uri(P|3u_u{%QW~J#Z9H0IpS^&r(f;_fx0;s zXBzDB3XkllRP@YPY|`%*4HPGUpMbPs`ZY;kW!%frztwKM8FDx)i%3WpOG$A2`wqZb zCAEQB*MKaJYcvNzZOw|m{>Z5aa{s5#rElip!K6jgieVvx8RNftwMOf|YNL9=PN;7` z`Qef1tcT92unpBGg{foGbA`r~#@|iJlU3-zuynW!87!EJ55J9METVrMXGsB*xA=&f z7067{3iH$w#(retsMu1>!6&~76#P#e<9jRFtH#UeGm_ONJj zplN95-T0ak_LbEl-czJG02J4);e_at-kzD zUBzPqbCDHO*dTxm43O7e$j+sN)r6cv$97e(gy6Juwu-VcrE3mujm{#mK(m`8v0jugQP@Ha}elG5Bt5j43H&!A1~97+;pVkdTiO696PiJp4w zd-Pi<9|&77n;DYGpe7q{?{&JK><2YWEG!3cXFxnboe0Y(F(gQ9SRk1T8u{+=t40vs zo9k33(_j>)L0L|!mFg%gumEux){nyWsrg_X@tD_Zg^cjx|A(o!42rAkx^}y9cXxMp z32q^H2n2U`g1ZK{Ai>=og1c*w#@#}Y;0{5*o%?xD)%n#0MeU|x?Y-8VV~lGmFT*sM z+Dh{a|16zY>fWE^|LMdcMAdEou1?oClQ<9s#M)%~CBVSl=mtTGkxXs@Q+77)Fzo_~ zSia}dW@HHp-mCODUPg}AcVtEgWP%Cw?wErUc zewC5R->3P^+uEb;7Cc32DDa~qT(kaI{)^V=fImSEB~<7B4^1P%JnRqO+6NlvZD?xD zGw#4EwFt{jH-Ex*m$Pq-^$M}aF;xYw1T(s+Ee&pr3Dn|jrS@gXp$fFwRwvji@$TD& zh2y|^`ThQHX!BpvZMw`9o^?;)98PTQeoGOR^1b5Da-0A$p3v#9Tw6cSjik&_?DFC4 zaE-Q1PvV$xBE03z;pZVl*sXx9AnvlR``fPs0iMTI{L{w!pXS5sWzZ1j!-3hxXz|(6 zRI5=0hd=~7G&F%_%3*HJl27^D@T_XJb&0}^YjM!{Dg*oBVzOfz?jX}6i^w4ptVbs)p47EtUwU4 zEUDINoIoy3hPh5rSXuUqr3zYTEIN()#@g>ne=usb3+;Y>M_F0c=BR*=yxcAddykDJ zE>eaX2xSQV1%m{Mjtqno3*B8Kf&{8XKbpNq1}YFw?EQx$`bDT@!AY|b77kvZ&;}kx zEr4F3L3w>-=Dc+|&@>pvgluDYAGWN`XN;B^HcC;7RpFAnh8N(``RjFJlFS`nMHSXr#b z--6w&+Dlb)(<9Ef#H_!(;3S1`#I(WCT7(u6O>5mAyuHB2dYh5+WjDBQtGa`|V&&Ho z0ggkn!LMp=|D22g93%294q$T(-t6~O|3L!Jn>L|$>h?=0Mpii9zHsojCoL}h0}ZHi z;=reg@JCP{H{6u^5;;rSA+AG<(MC(?!J`x`HeYDdhtO5lXSSVY(Ru%!$J=l+Ny#M# zsF4J)X{tSV(1;hG(%R2600!;1=$gLgmJ74K9?hk22v zzQrB)|2X~zMC5@y40L;!_p}%8ntUL)ynghapyfMsk=ZZVyZhO_^wlV9O$>h$t{*Qb zN#03*5OUabu48_7#DOj~dIJ(6{fXR?4Sd*fTp0ds66#?jade#h z#dkfa;ixdi z-TrYj!^`BOc{R-sUPfeT!dc;=OJbn7C6Sm}Vt@bnfxJXCn8DJ&j(0+d!s zSdL1RwqQ6iz7H!CC*&odU0pKIc%pS9gy^8&;V&hGk(@B^k=I%e)p0dNdX_9Mu=a{3 zd2lwGc#geQ6u<8a=tl`KaTI1;X1J4h(^52~v8#{<@oeI;T?_ek)RCEN*sJ-^zJEX3T%u7?Awlq8O$>yFL0^uo8ZsU z_eh4u*oByV*<(aVAQjC?6u#J+i??kTkV$$8#W|S$9=r8U+q;A5sAc?_*WCh)Jugxp zBndp2K#cT{*fOUyNQAWea%Z{$7d3B9_ZPUeUIr&KUNds;>$Qu7V&r!_KU`UY!&api zP~|R={_1)1iJ0MIgoYTxXrYjb1_C3Wxt;6MdH|mX_ftv`#!X+%o#?&m%S?FPLO!}} zF{-`s?FBum^0A~`^iSv*qaLLjrSa!b5>0q8AuJA$Ig0(xVXLKnkc0ThFxy6q5@S@o z3v}UN10UAUk5g4EetwfV!uGO|x`PGMlCXDWnZHx}X(_&I1>p_n4uVBKe8PK{UhUN! za1MLBwNww8b=|=0)izulH5QX}qN4n7VVcLBsBf9p2#>MjqII1Cti8Xno-n9525C-T zj*FxII*;(>GHRze*_dxC@rIW-P@##-gT0hALEZ_!87lWt5PpPJM!$zs3T}JG&=y5= z5ghoOFZr{Y6tWZ4uSZn6gpRYb-kr>#w z>-vQDkcsbNcKxS7CbAUDVew;>aVFs{R-0V})W?`Lg04Gp|LtlO3jEr`$Ue-)vv@X*-hNGZ2Wo|HJa);)`QFb+ zG8Is_HL>sQP3DiV-RyR+p9Tz}X;C2n5or~sb>N`GrQCZ1DjlwYEqZA1Scln#+0QW7 z9bWemje4Z?Xq*N-+pSe5Y!UT?=GrsA00#C{j?e4UlXU`m%X0=^TVD#7gl}`tK#hpi zKZ0;kb51E5Ww2dpC8R_)JSWf||DvO3PAP5qQ^V=jQcDL0tdzS z92bV7?%TY^xWWGF;te|gR|M5SWh@*~2fn5{hT@L@uv0vq7WqVJn*V7OXW+nC3KupE zIPIqYL{-O96>R*#=3H~u1VW>>?mr@}SOT%;wuAOXywRUg4Z@%@q`pG%b1M1fG zjAl*x1>3-qSJ|+S6P;{?&9KUX%O%DtQgm8;#d9B;>9HUzn23i{5imRvFs?1;n%IsD zi4t9yOSrd0c|J9UFc9_T!R4vj|4l)t#8P#$*E zR3XbtRssh-1r4YOyxjUG*C>{-MN-jX{CR!3REBq~ocC^p6p}AudLr?Ml+CpBV^pU_ z?`k&r!KgLF_i#S*`R5UBNopsS>W-00&sq`zLS8LBBDbruL5|{fU{-f{NaS?g!uxNJ zj4=Q8GO*q+(cY?C8>%gVmHxpoWLM+=|4@woh#9wPKyOgT=kA1%SLWZ(EXn}D1i#AK z+Ubdj(7~_B_%-?+VV1MHrly4eFoPzqsHDU~U!SxHAY5#7i;8v^6hJXcYp()`jQ5?s ze-pn)X2!;%{<{+Jw^jvRT!hO-Io-8?)zEql*+g z^IhPgARaR)2+*zm3E(oedWEg~UpoT9o0LnZzm~U|+_11dB#DYlAq$v|&@ccTC|Vk6M{LvuU<4;vqrjYU((JU?LPl zn6s$1^A?`2<6nVrh3H)Gk0ZYY(gD*xZDoyAU(l}hH5j@s@J(uwDrgOK42XWpCvh9SzL?T42;(K?_T87d`< z^!cOl?w-fsz+)!De;2}g*fj{&x*Py!Yee1oiu>5AeWmj_`38blD+>Rc{V(2{v+l5Io} zKlF))T5Vmp1&bN3Dd@NakKgZe;H?R4ULUvPLd%}rxegE~Q})?aCyrBsq}(4aT2>Lv zyLXW3SDf^&b>ssmi6YPw{NsaiPabb`=wrrQb~1lNGv2Gk3?C4qJZ%;=DoD*O`(dFY zFXG80z;m{^UC!Msv)3sF-;)QH3L|@nnT^W4s(_rKGwf+l1KodBuF6h%Nqt8IVL45& z=$h+KYp`VAZ0bHMGwEoDgq3_cF)pSDzuGR#_wMjfYST*nWfJ)6T>Jwe+@UaoG~|s& ze*^6T*=c(7FB3h8IdVl`9z!aE{s4>uYkcyXTxKwfjiF;X|ncYV?H zxX6C&Av%-VFz-+jh5{}CHaD!661`5WJP2_6XaER|LWBICr`r)Z*dk-K_YyAi3U*mV zopP%+E>qsZ_;KWRvrdBDi9%(F@3*}9x@Wzo)kJ7W`c%4$J;s(bc z;}WC>HH;wJ?kaD#{v{kMv+0sN{MCV{1!I6}tN#{p0i$`v-2MoT#Z1EatlD;6zY`Gf z%9x*5=3wSuOf-(2c^ugewsIN;YPtdc;%V}LXZAbZyEBVz-n$AbonO3oUgCow1i%0$c7ocW!WupK_j>wGmjJVYg5~tw|1HMw0N?XW^n{|pI+|gR?fZ+MG6!qtv<*0O=tu=X#(bO0;CrZ=yz z=6ZeLo+Mt#6>bfk4O<2GGZ3&#h>2P}ZKu-`N;JgK#7xroO*9#B>@pV{A7S{3>iQ^Y zw^kH1@^pKSE*RSEU2q0oofIa+AW;u}CGDpxF~<3*N>yNU(ew7=xEooi8Qlet8;M&O znQms}GR<{qT_^h5q`H|EMsIS1^w~c{>v`l0TA7N=R)HP(>29RT`LYQI6~ba&U8FbL2y0=_rkdy{wbIURB+ z`Lu+V?m3MXxy86^w1{&?C{Vb^`8?;u`p^KT$JL$2#QkrQzXHEV>ayU@f&H3qj;{ho z2f0(b3O<@IG*0Rd+fNHtm6_%hnYQxxc;0VNk%@w_<@oKnj}fCa6%E_^Agk``cB^%m zBpGxT#!t$X1Zg(u+T^~UzWo0C)KGl;O^)qnnTG+dk=jxfK-5nU|^u^ksyk; z{rU85`s)`mV{6t_K{a+_py-OlO1&5TEZ}os@ZL5)vxDlstSIhlT~y7~!wQiyHy&*^ znLy0Of^F&B?JR&6key4F!{-YoZipwE+nldMmxkoL(qpcEhQItWVa3T}RgwJG@Acx3?nC8xoRKv)h_ z5<~r+^r%h$P4CIT-$|>5I06EKd>`f0cb*bIC*UOa6;fiaGQ3L_DyY%*->KbT7M%3F zo_SO^ql3H^NJ4Sbb1_o$JbMD*985D(AQ#fcgCrSYzKM|C+O-S?HvPwzh7*1rz z_M!Quljk*9NSd=d&wZZR21nJq23F)Q2R`~UdN~TAbE5RRqn<;7&Qcfy7)36hD z($E>vTA1bBo{)0Vj}P^8jBoij6a=&E+&%ar+%mBc^_mbZ6dR+?1g&jMcn*H zYgyknoI>dJU@L%9T6Kn-E%1e!N>ug}6qHTew?ZJXmrKRt-*;tvLJg$t>DYCBo;AJ( ze;18`0^NM|V9miCOJ&MLu)kH^fZReSBU{ZloFO^y;e+&tZQ7_E~#sixT~KPgo+03RQFsh388(-=FDFlAN3e?Gu-pL zDoMo1ATI7Sl8rxyd9k22F$*8eT$OsYgGPbTZDjIK@jUs)BrMp{y3xUeKPNyl2zqg=V|$C3c}_3ZGU^wNntoICKV}a3cLRxdyzt4>;x=Pu z@mKV`XDEvpXCU!=&4~?hIlRC`o7?Y!6C&1?7%eX|b!{p!Mp$#AlwWx=oH02km1vW5 zTq?v|r-ny2y;cyWV5+2`Dopdbt&eDYH#qO*k%LVHFecy+VJ}B0Z?=;(BiW>n(bf-= zGuIqRz|1y~=Z*eD#gs3QYlKNvHG`jDf9z)K*jl zF>_jnBC6s+s@e$E+4+CSjZ7uOce1knz;^K@MoA{2#))g+qyASa%mw9ujCV-*ghz5a z{RJK^k-ssb^T{GPqNps&-Tkj6>YDR6kDvXHU4#?EXgNLiQrMm>pP=%21UOm(FRqIG zx=z|BdIr^2K~p6KmW^M0=6j&w+F5S`J=4P-V>Xgk{-h%Rs1XaX4$WyA;JL@e+oj7C zh|BG=S*YU8G`hM+p-jV+94jZQq+-OD)@SaRkM?-IKgcDbE7>0VEcX1MKr$l(wBnS} z{EbdSDOYkniW>0j)z})`nLc-w#Vq-OD9+YBqMUfRtB=ijB2tI} z;FJJh*@J5#WiThB$WMS?vLc5p88hnv&VFr&EeM-v(HaFs-YlhYZRS;d9FOgmWZ~Xr z9rp^?9}Ph?QigSZLHm;DtK*EV`6H&CEhln8Gm6$*TM_wydLyh+q++W$!it@TdXiOn$uT zgXu{W_baXkq;4d6T24#!^&nh}@B^JcJEn?y7vcE3e73LsjTH#M{oOYiSh2E~W+^$3 zB8sw!VAv-^iE0qV{xzLszur-G(}xB8is+xIxsDQD=M(ZZc6&{WRz}NfVp^AX(5C(u z9p!DDs|@nP&PHw8x&Bk&25FGYcJ6lNcKgNcM_M|TH+^Ir;m=_kN^Kt)5vNdc^OzTw z=@C5a_A7`3;ez026Ofc64D&#r2RrLQ9ht!$=JN_C+`rbgJb%F&F&15NEt95}rV;PL z>KupNy<}Arc5Gtqa+NL|YEq^8Yv!0fN>nmhAIi0N65aNPL)Dl?+u~^l`5l67 zy^?sio)@MkRW2h^hkZ~=TcF3RF#j%{nsIt`ac{H*{kUjJQ$G}`iq2dFv?y}cWIPL7 z&wOPH8);(|3K3}2+2-OSEBV7EfDh656Uwn+x=QHdTlu>vLyhDs?jb%LD81%Z|B?q_z#SqznPcrinz8@y^Cy7Lw7`GU$iGhLHEDoC78%8jARK zO-;quuqWY6Z(g+x*RG4Fv2UMn_{PNnzQtagc`2Cm2`;uiftLx1o5`%&2t7@3YG6bm z9#m~>WIB`R@F-dn&h?bWz(Bu=Ev_r1*dR_AYO|*41~`G+7Yf{Iy8D3@^L>>sP8FT4 zA;S~!bx{Sl!EfJuq?W#FgnjR=~UALSsC~d9$7SNx{)--xdkSulq$Oq zbaL8Pd%!d<(~5Vnt&ij6{t@TBH07Zo0O`D!?T`5}?Za^WXog6*k^{;puBl9HX(SHZ zUr3^Rpx0=<_B|)taMxe9(LHvC5$|Z9GO6QdsL6!XIEDzdSyt@+o%r1WwvrGoo;rVa zK)wt7dM)YO`3%f*@F5trX0x2j8mXc0Gp4au%avq+{OkGTr|?mu6fg70u!_>_@)Zxf z$I!A9Rn!=LaKqVBgpLm5nX3?BA=i6z;aZe>w#BU{)6<7-T;z4+)5tfDf&x^qU$#Uz z+_ef^wF+SlT6~go^_r7}dd*RgM%o(~U)W<67C%%TpceylOHiW+7;7?r*03YitjSYw z;X`!zeV5&6!)f`DZPKU%qHYZpGL2}kwA!?Hec}EaPXVBv}74P zz)7%Q$>LN^i&j$7qV%Jjcf5op?o7_djBayqdv_xTIMw96couogAGYb^`J|!R6^?kt zk2kN?1slg3Sv0MRt65jMRnt07-|zSUzzXUSTcUTU`;8wtym3V>YRa!1;$Nv(KE+ox zKO{dd<-J0~2xy>JP{Z7xwzP0Crx4+wCK!yanYNb&<{wd)ps| zI?dlHS8%Cu8v~YC-v$&p0tMX#OTbruKEmmFgQL%;dokomfYsX3f`@3OS_>x9`@CC~ z>fd+wZMVrzbyI9z%6J}Qyb;d@T zRx%L@xqD^My_QDAm)@Z8as)Ec5MSl9QSM5&wFREjbqJdJ(qrL?Vall$&AMM`P ztROepDEH)9dK8Q;1?n%MgtBDmUXjJncF|#e74~kuu+atDq^O2r7&dv4k%3|5%nxue z;z<>0j)*LH1CksF*FCTw06~B4f_&6b1pA0B?=s8?L-OLQ4-1T~XF+U<4<^JLy(MG~ zfrStHLc1^vXk+eop&w$~vi6j`{``ry``)v<3o!nd9caP3Vtf%`#jR3p{<6t+xCyAN z)G`zJobN+z=(h;(D%!>V#?kSFmj-|Xh+2S{T+bf?=A77078sKdC$Is(RdkDl`Y>PH9%{5g5PFK)3C4I3siv`{ z7FFE?Jjp)?A`0eC0?aJ}eo^*cuyo9sNqMyGl3X&A^Pz~I_H5MU9;bB0^FJE~8M@87 z^)U-5nz4wMps8cSain)fQ{q^u>%-pM>YRKII0fq*#IgJK3o5c%laa8b`4e_+BC=?1 z_DH%GVynvEpYf5&X)K!pkHA(t+#RF7cZ#BJqJFeIYGD1ymoN<13$>Llx`RSZHN zYz{;fFy=;wrg2VLb#jO6jhQ}yo>McVvsK<~K&!!B7K{st)tUoF zU!L{#29r^qH_%3=*n@~jKo;$@J9BS4BwQBtZ8vvgZ%!zfw_jl1zNzRN&7`3nu;$b= zfi5Me-hF3H=V6Dk8ORBh2jO8uG$L3{c=P&(c6wFW6!uVoEPydpp}z^K?+@Y++8?2) zfeaIE{w;K zgt9NcV4#^dWQ6kR*D1-@x@qOQIBCpwqv592i}0yJ!j*A-d7h7OaeeYAmubMI34-&#S1mglm<8VrP+4&JF6+pS`2 zUm@I|qPTA78y|SU!NHnsBCE7L75__8X}en%J|;hnm)g)x%7*XfqX>sqgA<#izCgn1 zT31jdYpIVP58vk~?B}#_$7YC?(@pa(r5^8VvG+)kqtfWM5&79WzvhJUF9&k)_1wL+ zM*?3bhUO1TUyz~d7EC((Wi{Pfd2VCFP>C2=DOSe1BybJH@>A}Jiz|c-Ce%g-zpuK6 zmpC}^d(*@x)TIr=NJ;K-CzR3Z^I;wg*mj~S&Rti-c8TzLIvgB7%e zlU`J066AJ+2AAvgHXIU%LAI+UdG;fY%W;_sA~ngva6s}P+u3BfEo^AVURmT27W3vz zCs}iLLhBFhWGb^7z$*Pk;9Sw#*@Zxa&$oPd{-iN1 zIm?!gLB_|BIwwhHQOm{IVBC*jz>F;%9ng(9QJ4>c ziRZJzmwrqz{Y$AK>P!anP)PU3DPF@_dPJH}Xm%;AydyAdx4(v(4Vzox_y{ke$Ecsa zZgd@QFTI6(1xj3DeEjt3IakWvfV)d7gk7}REnCV(-)d^xivv%@ck6>~`V#H2$YxyZ zsrwn(ZZHC1{`JvDH7!mp$Ry{)|8;P%mHXDXQ7Ygh5$(DO+qKZyseQ1hdA&+g&8*8a zWe*AHBy~AI@|-9pgz~N$pg}M_=QH{s=)wOwxF?^4?}iF-4fYG>#+(hSl_WeJKJqkB zZ%ak64!1=%Q}r-R50f!aEYt@Efv{%@Ohm64cHd4Oz@h4$H)8d;uuBFeSn*gE7Y#eFl_WP zGg7zqu)Ll)YB5tM~kM1RE|8aTm?wzng`A7F^XM2 z{&agYzz!&}TE?(`rBJc@#|;85-Z-msK{%d`c0zr;@;OmDh#_l$PJK+K!4`z(!{{WW z3tMez{&1ECa{xCUab>EGJ1fvY=Bv-=z}FIjWUTM)c8dl7`k2O7D?!{78@kxHWbAU$ z0F{*$^MNXl_)*29_w&n$L;^JY{FOqx$rP5rp434;h{g!N$(ia;pOPft0C6%Htf5q0 zhW87(rmZWP1fvI@-SHmYENKOFyXcD`S}9xC!Z#u~e;-Q4Ss4`E?iCleBIR2zV1s?C zQVB}5SU?OJlvvBPRe!JW`#_i!Ag2 zn6yOrw-HvT#W&@iH5@I%t7Ew^B&1lD`TUlESAux-y{&PPKDN%49w8b1WOq(K8 zL4Bpv)N-seo2|?thj^7&E5<30hUo}bbz)Y5F7 zvkD{1ST~q8@Colb)2Ts8atjLG43J&}$!jvWik3DgujMZCTVfEA!vg#1LuSo;MP0x6@>a}A;PB#?H( z2m|pWGGhVwO({=We176XbhTwbzXvSI!i_3??s!=>6Jj)dormh{P>j(te^63~-JsAqOnN25SPF_Ucj# z8qV)Q2LW

Hp8er_j8RelgEr3QUK}VU%V4#5H;( zHony8Txj~Xo^bTwI84nq6+rT(v5Y*S`Y5C9`boIwq7r4hxc>Yu2FbGIS z_1seU@IWtrnayS@Fki-6WbOy&WDyfwcfWFP?w7(wGRKuH3FN_597HP&7iud3DklLu z)wl;dd?yqqNdboa+@wBimTn{Kx%=h0@B~n57)&dh`}Re6JHyu> zH|bYyc4H*!dIg-QDj(kj>$bq~M3D%nfjn;$%h z0yGrJv4mh6|EA)cG;VNf^OW3?05PKIZsov287U?|<(A?fYhA*Di>YC0z_hzLSltb} zJVc)m+6txL+@BB8@=jCgOWz?jaq8B=r$JlUV-tgIhdB4IL{h#&;U8S&fL2Y`UBR}r zCQYXiTb~RjB%%|>#0O;2H#Z{RA_@VRg*T71kw8|lHY=I^rQhG$OIc@d9z5Ey3d1m# zHd75^OfX&t+#&93FkckZ{#8}V*Jp&gpi`)Jlvqq0f6^1dS z`g-)BZ&0l6U`Q?INJJmhhhHqIQK6^n3lE-uMDrw8&LdRN+zdsYLOw@CM&LHr79t2v z+)|ivVL$};*66q~MDnIn`R8)TO_~@2^xkr#O_f@h2B`GQhxIxY0AOL*3%=huLnFwx zo;Obd1NHYOE;$iI7V&h)NL(&RK3$RHL87|MgrmxXp$+DLs9HmYkyqCW4**MfV6+!Kq*L5bV@Wn(zJ~Ijc2Q7LTj$o9&WyMBv3u!CM}gCeQJ~O-P4GH?+JX z`a=fz-ZT0XXhK65f>+&kG!>HNJ-KrKKBfuCCe5I0oE)$elM2vPwvKtPc20r3U%$V< z4ny;ISaRvx1nxYv264&bn}2Of@uerIu>Wrb*#gaQWWbZ9|tPf3~JRvfij^%dYjue!2WgRPt_VIRwTz3Z`7T8v!xV6`5aRQ)ni zd;oyE00)P^m5ru0_k+-2QQv2S5*)r?Dz?1{eaHFbQsM&~A=CCF=VqO!M!&^f)cyLE zO4*RQub`ejc=@4-ZW6!`@tCJH9j*v+`&Wa3y9X|CIG_s{6=T7RXd+tk+<)G?B=kF3TD z)#`e6(D(zC9g`%$0b%JbN?V|d$tg0OiRhBX?zY)lMcM;A+JKKW(%!k=rl6Y=%Lkrv zuR6Wis-^V}<`GGP1$+a{KA7`=b_)d@7%_B{GcMW9bl0su{i4?Pb43RWCxs71jVPsd zh)FZNlQj=im5yX@MK;M1gMy(bbQ3TD(DdFaV8bI0Eig<1DhAFm40Az<)82I3Gyc}) znaYKUP>>v9{91m@P{QgEp+-As1`@upFv*06w#NQ$#?{HipuSB?aiCRWmU9_3q`5-> zv_jJ3Xy$ZFg@5gyGUJp+pJiTA*m|-93aA#JR zy4pxT@k}{9*lvsAlt8EDV{RNKv30#fnYT*>4VgEGEt=AS`b z9dMHu(Nk=WS;Sc^6ooV;71d%6?G?pIo%!a=u~>}u)Y>B4OT23jG<$e57DOr;e|&zC zv3y`;Y0By0`t$doT-5kdWwRv3*CqcK9RT9)uj#oF_NU*M(6|_~^rt@?#QSvG`Y?Y! zsQ=Db%rd>|$Euk}?i6U1=xIq!V}ZJ|I@2X=^qTnH8o0F5*rq>^?`?a<=MgYvE9cs zOADJ*%*`w+Qz`TF=skZ|%~Pg?`A z<;AIf?xVE*{yXj?VmHI)PU_6ZOF7EV7W-*RABnPAae{@-7S`QQena?`TAydIQ$HUL zjeCU*YSp;>Anqr7FS@#cUsJ}e@gbkW%!I=^{cjH@@_pX|rq$g=J8aEQ2Y5J5x}7$D zn~Z?Xz)Qn$xZS51hi_uzTiW#*b2Z-w7!k2rBA;%uVCRz-D9?iDp26R4-PoG;I#>zw zW3~{E=9PJwKv)cSppzMt&HX+(9Q(;996@qyu^&vTej#+?t>%N&rr9XgV|M~8Kt!JTQstM8RPnx!OJOEvCZ;=)3HDR+I+Q~2(cJv^^4KF7(MBbAlc)wEE z6-=f{z0nx@dG!OVip5eAOz9p+s*Cgp?7N6J`Dr5YB?{c=`h?MHyC8^XW7&L+y;>X@ zu}Wi}b+_)@;d@XHP8T|BcW&gqsZCqob zg_Ym%{`QnKB>2m?s;;{>@}NW%9Us_|{HRSi9UEtwnT@*IKKbwWnI>e_$IN|mHSL&M z$;4Cv0>l(GbQ6yiuZDd>X?$aw;v_p`u`zsx#IANI3 z3H-8+JG4B|`Z4n8+XPaNG#3R;Mz#Nb>9U4YfvC0aL1RtqTDJ{iEId@}UcIkXL;bcV z03(SoRW@C77%JE}k^aXf=CeATcZs>5Q0doBV`GArE+o8ffdjguO#5ubwHTFAwET!Y z`iamIP@R0csB{2|?k0{wA z{ygv7+`$_I_?#F(6i)HEC)OSq+DAyRn;E(sFrtdJeYV`xW;1@f_YX1=p;JZl6TF@l zpd4mhsEx`C-g{$~Qpb11dM#bV@93V+H$XLYQi@qnac=aeEhhy|lvPup_6FcfQJ!k# zU2`kiHKC0EXb%8rUH^P~`EOd`ui@ojG=o6EavSt5ex(Tym@;UH!400IdHEPwONZ@r-kw42&DgvVKG)_Hw&0 zF^YXM7C8FfDNP^mK`Wo6VS21;Ar)u%{1=3yUW+Vsgy{4mKi4rz-hmyGx^SZ&t)o1C za)6_gzZhz#!i=m7Ej$Q!k?bPq=Hp@QU<@twJ)x=*+pMhK>)@C?Ya<2^r7hb@x>FnO@MmM719|LA%D z9U$B97XWI2&gp9)IOBhi0=rM*ZwSCT$oThH1z-i3h*l-hFs``(vtu8~WdG-WxJ3p= zMVOneSLSM^FN`{Dev!18#(k@}w9~#$-%=?Z7lLzrp2K^4K4U%sXa%kY&MIYruMo(W zqN+coiz_Ol6V~qlPM^@H)nvmB-}@FEJApGY6qa3cg{evhrnbDeggIJ`fa1ISCLoM& z*t=MzBmWObpp{q4&wIO;30R?dWmcz+L^rFxY#0(km_H{5zNXtf-JQ8vNoyh-j4otjRVYB>bkpM;N$#>UA}xVF*U6$FVFX< zuXfaxk{SpEL(|aElx?Nk^swaRiu#RDPpiC0>g2Y}m8wKXMSbjfzAGuAQTdOp;8|MF zY6yT%8<7u_9CfFXU*{#QTMSypX%^(%YDkK=yro7@I%_mkb3@o_NBo^H8km0#)z;zm z#P{di9(o4NHCvuSc|XNJ>7z1!)W-Xejs+F<%THz;tD)HvLudsXcB6b3_kgSyuv0C~K`Z->Shq`o~8*MmfolT``6gE$d|`9rxR5O;1CRk)ou z1pmAnWT-ThL%~cIoks{9QL0SMShoc5K|`TeJ98e{>#LIfrX~|+Cglx@A|HzGweO>r zE5bM4!Lmy-i>X@VVx>b_#THf~@a}RkMM47F2YyhZzL9I~{$Ti*2qK5q<}1lu7$ojQ z2=YyeA~<@Z{;c-Kh8ML{iA_V!mkz^o4NP8@`W840yrd3&6xa2YCuQh2edV-HSRMt` zo~jJ)6&`80$SWBX1E=S1G4mmpNdGeX`07BfW z2Pjk#BZu&=Y(sx;Und&~u^24YA|1B!BZL~>4&eTU`ufhQ&!r$*6=5$<$#&?w>ztds z`27zp)WaUQH(^C3Ek?)mUM%F@D~6<~L!b*s1kk?x$6eC;34qhnwTe6tY$w7C1Dezz zaX`Q~Q^((F)TH_JHTrKLoXTFxr{77MoY@V0C!8s5IpO5tI6Pw+0=-L&uhwep^#Lx3 zei5wwNU8~>mpEgYg1cRRDhR;sOpCv4nMO~fpq$l&0Rs+#ZQsd272mhIvZNze@d4Md z0#Vp~rACtL=`pRAKjR#DcBJ!0U%3!%`+ooIWJApKoiWE6L=u-uHol~Vjl*GuH(N=A z+h_priz9!G-c7CwzF39q&Ro zrPAM#kI$9bsBWf|`JXYYG*RQeRit&LWyrhubcPWCxEmRPbbwKU5pQ%6eVGtk}A#tC0I6#yoOkxlVoY0Hvo?N0|NM^Id>FM=8`SEBGRZ8m{vGczu~}e^*F4 z$MAX`E)XR(wqPbRThjIfATP-h+-kz0l2!3r%RkD|P}F|A#JrSeTw}f0Z8|^$WrP^g zl)wo3gaJNOstPz8G<8ZE%ukShtglv$MON`MhmOA!vPz{0P4 z&M5I47!q}yDE}(&-L4ezY0^YrSiq&xU%1Yp_ zPAbTDxL6I)kG#>LR$ZV#jpuDG+j$}j>Wwrzp6j~vL=XUpP?>!J(-WHWuNLYh`cq~6 z^#^0=cddbbj%BSwvWLdYwf7r7{&lf*=O!pQu*YJnadS#SChhyMwV1=R%~q**ZjOPx zC~yp7Ha}{Hz1Rsrfb!JO7A#Mwd2s(`j1p*&>XEs;HianOOz|jg0t?dl4rdj34usiw zryZYDQ%Tza)vP>**OO34JOVlEDFD#qw$vsVyj(BuPIQ?OA$K31+qVM_L{hqNl?0A;_!JA7QE3!1k}`< zT_giFu~3U&_~>QG!CZ?BP-+dANN<0h-fKJ0xwVVGkShZi3w=Z-Gy-@Q_uh#m=FZC{ zT`B}6EC`m=1m1u&B&z~}@BoN3ZNIWWO#!#{$(U&hHRM)7&wa=0y zGN1XfJqPo13}y#NgtNw|5_qV>Axl`LKq6@a_wf=*7@1lu-uyJy#;nN`09SOTGCp)3 ze1nTJ;``6d?z(MHIcK3V7%8FukFU3ks$+@TgmDQD!65_>5Zv7@xLbhW?(PyaxVyW% zTX1)GcXx+r?!DhP@0zt{ew}qrcb~SZUAuNY!fRopMZjXrR`BOEcSI&gf-e-(d(77k zrzO0xa~gTA$Q4*8GN1giS$DU^Q@(*>U9C=OjM~ulNB2yAA!VZz zm`HT43b&B{H^HfXxks$36Vhu6CH08 zGgYJ@>d*DGE%^t$T;1>?U3KLa*kAt=Hp2AjbGgSMCV|mVRd=H3qTNoXgXnH(zRQ`v zx-9zCUtx#5;at)3$?`Y=!l}0JQRc|`i&-AlJb`vt5h2YV`hr;QcA=-qcVgza6|Gi} zz=9qGw66~#8IkaTsS#-f$=7derR(oKx%%zkO!ssEjwcI!PB$er^LoPQ>p2}dAwY)) zI%8vh@!uO+p`8w_C4=7GZ~sPU724M^_|O0XpX2LqMl3)1i*a z+wy0$>l*R%&`FxxqR16#Y!-dBDOx=lSZ16BnYSKQatm7pQbS_LlC97DR_wMd*&W}J z+}72g(nj-EDaZ{#bG@|KKAre-Nlo+H!1;8N9}&(aktgIiTn`Xm5ry91t10?4NID80 z2=UPMr5lvGaR!VC7C@U2r_Jh&0|K#dGz|fI+*87ul+J$;hRm!2AiP=XGw4@&oAzz* z7C8Q-nCZ;$r-k6cmU+|hmB19?t5cn^dIm9>FZrP4R|5;>-LVjTBP?Kn`tzuO9$*mv zV)xLOQ$RWMckKcOW^m>p&OXauzCLC8c;%(ja?c?&ch#BUD69HOV8u_Q3V`I%71Hz6 zTVjIOK9u59_^mlAdk*3WLXBL=%y6{_$l94DfxFYMTbh}|F?Pv__lzj(0tT+b$Y0s= zVfe*5zP|M&zdHT8+jrO^1#yLeW*tE^Pq0{x(&p?W=XdJrsw#2n`JD4%&UwgbTd(Dc zSR#bUh(EFTVkfnc3QtZ`6iKmx7C&pUu}+kZ2&_vZDxkAe{P0C%fPe0k&uJ<}_fbQ3 zQq6*Ad;MeK2rBuqcuxDP0gwdZ>uhF+4)C#GP<-<@RBc=;X8(!I5B<|6f_rF95S@^9 z9GT6Eb9_|*q_!_l3R$lU<8Wm$-_;mBl>1FBz*^pi0&HAWNgB>2C4QwdI_FnHSEj#l z^ZEt9AbKBGN3bGqjc0BN6kC}6fg+nq5-^v%j#rY}E4)-kb_}D7@{i(jt)>B4`hp*Q zfs3Rt?8O#{41$1i6W{^av*svWRS~t?b$e;BuDJ!mhlIT0S3`kE3zIF($|?AQ2yUDC z4as}(M35*FQzY|_*=mCsgFj-^dqOW0h820}$t+%RL9q_)_!q7YRyV~bY0BDiN7xBC zG#UP&9@-Kd=aOh`kzT{1h7c8fQ-mq&WLU^w#R*qmY%W3oVgEk!oi;8+he}7K$jPob^Ne=XCa*~$a@e983P}wG#q4if{wX*wg8od&U+|b zyiXAl2=Fw`GU+f=ng;-`bPk#EAb7jV!e11>zhDeXh!v)^?ojlXsG`=M#623PANMF| zkK_aPkTSPBT4{nTpu?^$pIJi3jzuyuYC;1&&67BTIFEr_PVNKhdd#+fTghQgQ3}Qg z4#Hv1nVJD42|&0xO}-x@ABCI{RGj3=Z1D;v-r-k}6mLmjJAlMxQw%3yi5wPzbp;DY zNCI#N%G9dw_!ER|9JO7i!Ah*Kn~X=KpD2tGTW?XINYas}HQY_aKpjhv@8o(IcibNF zCjhOus}8N{)3&|`GQtxIQ=I(2+6p>-&{bFvn0yu|$0f2s%s?pn(d^V;`g95kPHFj!N}$)4vZh-l{!@31yvIk zY!ZT2gw{~VdaIrUA?ST3&lYGP;ZU)|?y_I%5j*I7=b_97$wmNG=3uFw3rC8hLuSCC zF^l}??|gftV_uSPmuoTUzI=1svsuXX!`&a%GE|tRs8uq(lVA=I96CU zaiuEjJ|e&IOzm5W@HI?5K1M0{EXj&BV8&iII7d_XdU|TW*G&=!sD?QO_&ilP0AjOj z)Iix!!Y)F|f#1kW6=L=Ih#^z0R2zEY6ri=&%m9mwwNEgaY6V*0On&x%)Itc66?rPV znkgiHCHeR6e~!Ay_d{Pa4Q;4epIwsah)WUDO%EZmbG?GnkPnr* zPpw)xF6PWxaRlFjs3Ay%T@j7zxFukjYXqu6w>=^GmPa(HNvJ@{`*^bHw*;1HR6eGb zQa67N6mY+NC2iikAQm8R$qgzHQx`8|W%;4({x;_E&)g6YM>)RVsaRiD*=gGMoLC+A zlfDzuiBRXdUm>o&BCaF%nhwVFlXg_niEuN?;ksXg^YX2Gumk6KIdnpbxran?T@du= zC7gTj_UF}_LOXSrE{zfHB52M8Z9ZYV8nz-7J|ahDvv8ZE$)r`uG&={F^z8QM+_P5V z#HT9Nj{k2G{i;DtRaI6#vb>^VZvGN0Ktt!6Tv&MMXDuly>1MRq-roK#NlQ!16=rmG zbu~9Xznw&@-CkN)`1e|zIIMQP2YCLAAjwHdNs*1#n_zrs{WQ|14}KMQcXu~QH{N|J z5IN+WVGpvKb!PLKTr(irHhjP3Ojy*}(kv z)w;Z-B!>{4VUy}K8d58o$G4AGcLVCBS1m%@qi=Y-wBM{7^MjdQyFMxgoEwlD_)s4B zN0wktH0KsAauGIub?Bs<=+f3P%=5BD04wlJ^jcToMEgQ^R-t}&7!0V85 zqwc8G01~?MV&oM-YEKeI02DGk1wnI)(kI(NzX^Ww4Aq*Ef93(6cK1{Bk#JjIVm`Dm9mHXSaqr%Q|EXp^tja%OZxd%c%RKh>_Rlh>dcsANV7OsQF^ zPlcq|Qp%aiNl@m2C|{Ar4}2G1>?MFt@Dm>v$18m@tzj8sG~?}vH&d5G^AIe>hyi^q z7gkYnpHr7fmm>!3=bdmSR-r=6B0=<1u)eKZ2|&UOs*W^^>YRyiww}+jF7V@Os;oy5JLGwB@8A~L#*vSOe@eC3lSUYwaCW3J$NrA3sf>|wWnNR zCx%~H&DdqII|pqb2*q=3_@R-Y*WO7=Sv_A;DUNlwm4!qs+OFa)WZw^3Amh^yPgYt9 zjc8ZDop~zb4}{n_E=ST#)~}F{KG` zS(buVWa%#$C^F`HTgu$6mQ|42lhdn7?#4@xYPGbi_sP<@Z!bKW_nPqC4T)$s!y_#= zHZgFn0KE({DWnwVdRQ`MGVo8$-b$-DP{&6_*$I;hX?#G@d=G7W$SZHPB4n8d2dHoA zB2P9}bbEi6OLtHs5nCUOmG5#opu)6}KUbZ;8Lb+KFV#yA6K}I=Ji;F+?#bx(GDj2+@PwtrM{9aQxKNMN`OR7qZ^vLAuV$kN$5vse^HSl=B-S+WG7p zt1UfE%wf@8t?BIdA2;5gDu| zKIf8;u)e&)U0NQ2kxn|vV3}Vgf8+k~9HjrVH6Z+R#zqVs`DLkh>`G{mrrs+ecEw;Y ztpMX7Omgk|AkdC*W1@wX>`7`R9p{N)m8h@Tsg%Ds7wef2=IOOjle3oU`;(P55lY_HA>6!aO;VGM-l*Y9oF{;uWrqUF_zNwO5yn4J64Q!^`0RAr-I zsqaY+#!k8|D2PW6EbNfU<)>gPxTV}eE3&-OsA^gT5lvB5fg?#J=!CMCVt{vV5F8j8 zU0%DGdmzAMGb$o(5wn3g+9yFQXccGGxbn3$m)L-T@J=bGvbkw9JCan#Mu%3;o_;E= z#Y0%P92jDgr{jvrFp{@1mdyw1lh{=zZBzTB8vb|FA>}Lh%#x46oQJ_807wdu)>VQ| z75PgopzHr2%Oxal^K+d>lG)g$M`y`L*Skmkh=7tmEZSUeu`#EuP_@eW=XtV+XJaKs zGA4yTsaB=l!(j05%_1HAKTKsG5-={AB6BRmOxI&dGER z1R*|w%C!gZcA}~xwts%!7yOPdGFX7#%~<45VepOpbd3-K2>YiAwQFEE6-4>qPeyWp zE|D2v2(mR)VNUDfuN`s8Ke2xqSxHa35=b(m&;VfHciFM#5jS9+;@Q6p%{!fSx|F5!#^WcF zfmX&=y*n{*I+DzSvDV;x=F1O$B6A*=yK;m2Oup$BfCj*yK%zoe7ey)Q@@HNxE-AC) zAkUjEF9R)W^uo?s@bWFQtO36{T1k%@S6jg68_$?nB%TFNVkU7jTn7g0C0g^2ubH=2 zR(OZ`2Gg;_rAUZswq9bGl&V^9~&RyWE^ z)}Z z^CZ?uK^pGTTT`2?9@u|*$9py`u6{X~#1unJFZmG`qjrxfDKQpABw6XpVsu+(4{#5v zQd_aC+vCTm!{?U)6g0STCDx))mT!NMlPkEV)D~~-$O4O23Y+S|7@0(|hE)|sLz7ME zOoOS;NgT?naFRv$YEmfG-248n0;oA4$%9tE2er6^1^cTS`B)%S5-*ADXQE7v9r^c4 zBkr7smzv2n!}N+wk;^hk zf63J6&=dz61E#D}iLIW0S3dIw<9PE346u`klW{>S$u1oBPS3IYZ9BqS5Q(-RGEs!E}t{6 z2uJ~1qCc*Lvg!~cD+{fNQBQa>;c&sFc?aAgII#K6(yaGwMw;93CyG2-cq; z#MnWQalhB>Cq{*Xn!q$iLSEv}qAPn%k&4^$v*Ugj^T=q04J;NRk7flU5QMIZBl+@X zRLr`vWM@bfy90Rph^n6nrAcR1j?K@>n}SeDN$gl&#!Bck&5oB;W2(Lrk4MfSJlayw)4%q6}D?mLCqsDQiN)}xUWt^1^+%Jz$A#e zdd7OjTq0;7_qz3a$`raFKHh{JZtVOTX?So+E2BgeD`$hUvRRy5oMt%4ZsbG~Z!pJ6 zi7p|}tQU%ikd_932EAWn3F?J;TrVQivg(XI`J`K+PE~=YStg80r9pI;(WJUEai)Ox zS*`+Xau=%=zO!q#SQRLBLRAQa4d+zp*1?j+S)9-(fAwu&_O63#o)1-i2;>$N(^;1n zs2VG#oNFb+=#rnNJeog`_oRJ1frNxHyB9#1rN>g8u+!)CvKTq8CpP9WdaODa=xD{` z5ZzPHdIdT=r8rvIWapdtJC?FQV(OS-sjh$Z!fjsQ&=oEZoZ?~|->&aE9sc4b)Rm!d zd$Uz=|Hl-pqxBYM!f>{VOzDzOyn8F{V)nry@hEDxSX4!C9KM!vCB|%G0^!LFDEp`A zwKfSW6;849uC{16WCQ>KFIpnqN-m?+VQ~1yqxNdkVb6i$PZAupMs=WAc2zpP^!!OeWHQ-A95~U1u#Xsa%8%A!39Y;mkJyV4x zSPC#Y=F$)Mq_Fu!Y@F~HFeZDKl)qiKICQ9}RPg@>fP+H?w6zb(`HmCaJA&13)A*m* z&qQc{Z%kmd!=S>d4VcjygxD&{IRD5mB`cMvDA)Rp=n=Hk0#)`-2lBJGlmkjyI)64% z6dOHIS0-9fIHfhBACcel>j@LEc7u_DwmXHP!IC>jIzH{yy4uR3yxRj6GjwhW{!c#8 z@CCG4LU&K9tC+29nkv8y23Lg??p&3mN3yZq}h2AtBb*$&QUg+))qtWM2fI%NDeMV8g~UswNn;1{u#caifmt`-GsVCBb^f2Hku6;| z=bQDed<4=KJlo{$^6VwK)$FhnbHx{<(N|V&l-sB#f}bxo=WLCUFLO$&U$4iDM~VKOhhVz%ck{%rBW| zTWKaw^iW6csgI(8L2BcXZnEC|2dfWQM#n4PZ)R_n5-ElXDep5i_fBMZ?kWMGNO>3; zK`I+>qErgGs}B9DUN(!smoi84S6Q&D8rBTI9@9&Hw3-x+Wd+ATr_Af`UEHIn2z0{7 z{Qs5j7$S3joZ&rL_l}|{u-bU=fFg5WhT%CDa6V0zmKt?Ky9D!=oA8e_ zS2{o(w3EQ>Q@n@HQuK0{iaTFLMGnfYziEYJ{lf_O(D3{>7v@FFY{>=WodAbM*d%TM zA^l-d`62h$L6do1^3|6NFb?_O+3{<#x3p*QE3LC5yDx-}9MT7V=%c*Cw6|0Ndyx8x z$$g>&uR8!F=l@6k|C3^KKr#KLX^8gEu8Q!f{vmf263|K+t3CrUGU^go2C2P?W;#GA z2!O3G&}m~Xp%gX_!QNw#Y?sv`r!30B{5eA#0S!a|C7<(0)}FQy%b!Q!-+UIFPj8%` zQ)Z>{cqJfJK{7#Q%mmym;U+p9`?S?rYR#>d=GFKm2T22IeWCW*{c5=40)68+&Z+PG zVKwpP$3wE(QZ3E4>cMQ+M#%WQQQE+Fi-3_vQRa40OUo6HFGroq6n;u$mYj~{WF1Ex zO69GenqImXubtIxpvP?i`sNZfNPS6ba0)22ZKISW)h5TTlwDC717!(`U$$9rNJ>2ihF0V`Hb3yWk+T`1f_*V z@n}ucYjwlYk8i@r)4Nq#$DMc6b~w!iZeE@IBz-^NT_E;~D0f|NbKYx@s3shv6V~Zy zFIa9jVDV#!p&_pRUFpBny{?#J)e<}yw%C_kulOg3zoThHT|Ie;frMx_7;Y;~0=HJH zz19q6{HrQ@d#eTp);NOE0lVOcPEYU0Q+(>GtKMq;`ghzha+8y-VqV$_*H(qS8lqR5 zXbh6ih+wLO8%K3zT}Dm0$(#v(MRCw6yOX|%NFk69@B_>yCt;WicnKI$G z22?5a8g!g6>Ov?|?q|IF1HqHu_rc>wsOj!K9FIGCOV`bM6{gY{UR>9A!k_lKC8^p3Tb@rqMG8Ax}$RbYMh1+(vM z=X-PN{gJ84U4~WWpiXa$iCXaED3ja*{TU{yV0|XTRa2DuEfJyC?ivcn?58b4^JWRNBXx)4Yi?j3pnSH*BRqDXNJdkk zI>df7DmdnpqN``9O>DAuywO>92qsJ*FqTwBc`kIER3_-SMlz15-UOn~C8TlULoFf% ztoPTi3}-qHK*#=TId70&&xkR$fR{Qu$!rWiK{+ImV=KRwS!Bk76=5ggu}qX>uwl0u z*WR|xmhss*yz&!2@?ilwvx0g>rwo#HeUOJL0(qz(dB#R6XhCExrUM8=Xj~qaI6X|f zZcxu56dUK;5qQ8%!)>i@B%9n#l>7;C9ad!p@}F6~6RH=W8ZM+;`u+7&2CzBv^E3Rc zz^*g>$&Gj28_G71qQ+J5)Hh{#v8CdL25ABMLWD7*iY%6Ici7f46mxs^zk0vb$u;bX zp~MaUihOu^WRy1LV~l({p05BgJ5R~*>+LCcCRcBOH0G!7Hz9QEtWZuX*TBI!Y&DM? z?#f$q7@*#&_Q%A)r@dH5xqB%w>U%rX=ngabKABuQwYaJ}|PEbX4mJ`^r|3ntKF_93Z6APnhpzj;R z%v+}K?PkdP8f>1J7Q#yLem7anMXAwpWs;w^^RzHb(L<2BJ<18t6~A( zwLne3**zvQfc*r=UXJ)KE*d--sGJP+OvM_-Pis3_zhWvM?nreNw(QnAVh7!o+MA-> zKv2n?3W`ev&a?7iTP3r~d5(AS{ReHX#e2bFqTwRwn7|XD_Vx%+DCgfMy{vHd1&qGM zpLffKy$!QDkc{UIjyc=K7XV^E6J#pjtMzZ_m9dEw!Ld8t2eG##{o(l-7ddDz=P8cG zH$Va`jseEdKoMf34hZM~L|^C0?NFWSv3LIG&~hx+|JWQ3YH`A3BfMf6ae%i+MW!;| zSHq_#UMLCgRqgK5rtjf}boeCv|WIa3CrttLtbXI-I*xT^Y4NnaeS8|zML zxQi^LcFj<7!r2Hs+wHICCed|e@sL)9dYk*lU7MDvKbJ~4%4BC686K2}!lxhZ;mZ~P zMjAd8%2-OGz-6;#Kj2ii2t(*xtL>TPqvhxvgG_vd(bPE`C)%_EJ(XQ3;a~Th6d$Vj~x7|Q^*7b*Z8=d)5! z8(b}>P`hPpt@bhp)QwDWp&L37D`Ec2J;3eH|H++lfmNRW@WHuJeLn$cE%W~v#%MA@ zMGgUPGu?BQT&@aYAVp#CMZjEVehuJISfl^2D-+^6%J2HZglgNIoAB}a(91OW-cRnb zo;>XIz;EFIzc1ZvM|p)0acWiohx_{W`Wyg-I>8%eU}*T?&%jGl(t)sp>pw|~FS z%qW`v0rZWZ4FL$Q_;b^DV&dP)M1b;f`yyZ)=K1=BfWs67*oL)RXm@zxvsq{6)U09y^LL@*|#8Wb0#Lc0) zgQO((ObT2&^d6d5>1_@~;i<;co$nXJU{x;y5etGw$og;6JR zU!&*tL0@4^MpI0c6U>!U7DRI$M~IsvO}r7k{wO9iCV7y2z;9DISepw)abZeI@U^82 zsIkTfF6(BXnw-O^mq z<)9tFf--oTw*J>U-KFQRc~Ovysu#Kh_yd}fkyXD0}tWd`!9N$+jw z8w?f90mc!7Zk@r$qx3G<_m&a8u6a~}^!ofb8$2efLht?ds0%U*fhI+A1a&YWnycVN zh=T5su0#BChTP%yxoG~3yX_1Lo6eqOie)(dnU{oTv4=GUhh>0|9LzQ-Hu{J4YSSG) zAErN+`ycv;&m}5Xv-wtH=TanhQ`Dbky)>bP(jdF1`*a**h1S5-*!{5-)bj^HhVu;?JNx?ncZhjhY{ zZTX;qm#?z=Qz>bm3b?&&1JN{4YTi}OZxJxTS3O)tm+-T=rSv-8os1y%GAN^UOtdu$OH&S=Q;?3`)vGf#(eKe4c7hKb3u+EM!pj{SZ;b@&7h&(mA< z)2?9Rj!X7jlj(0+@@~;R| z)1`Ist?q*^$lV(x_PtUoAt@@^x}6YbM0rE=7H_y`bLwDlT$}OXR&718OYw2HXdE4) zLm#*QnHjmB^}@{4#L71e3uk)2hxl8hvMBq_q}h^#G}Yky+=U71pph~!~LIc1)~zDN=rk5FVO zRWL0vsKr9fpA08M`B@1bPviJJzb^gKkGR15#SYqnH#jgve~8AbvNea-YQO%*+1(f# z?qBb)(r$9uZp62@ai-DElI@#58)Hfkr%yL+5to4BXz2c0VVZyvHsJ^TACa~l*ixjHHMVF z=WSK;xV~`1_dYL2?4)q%vn|dUjUAM`+8T%S&E~C!1>VphD>NO_;2f{;7bXDub-ish zyLZ$Rt}zojg)rd8WS-rHxS8Fbf*w@osTchUy!J=;&#`D=&({h%`X{f=tRkhkhIkqV z{2uLywU9l-#D|`iq~$dB^K+fd-_P^66Z%}uU$-N81&%zoUo@dt#}_{-4+jYa3(+3w zebc|8`SH#E%lOh^o4uc#DJ;(CN3kB-31^g9IpOMkMaFcSXhA*lYJ7W=^*(IMdDc4o z38nqDdJX5<3sAjWgyG_jf&iQ&l>YdX(pgag<>O4kwA5St&!m!K z?q2;iFO4w!F(4rCH{wG4iqpHyG}N0Z1$pix{KpP>>whja>kL4db;upiqL)O?BBww( zPoo#YhvYSPp};1fD!>C$*F#gFZbN8>?tM9^f*qAHl(-Kwo!z63{LzPu5{whboEWj8 zaA~TA6e?nICwQ-dy7yUzw}+l1Mc18sK%vXJ^nS=|+#A#GWa{4VpcGWZ{Q10;?da-b zXj8gI?9@wTIVH++LHlJgpdn(Z=$$Z^nyz=`ia%=&gWEprmY^etZjFG~e)MN_nEL$mQM z#rhSgYR2xUSoBez_cX%~>O7}VwDz1>mekpFHQG8YTei0MLLu}`qd347khlrx5N0V3 z3n0k3T^F8w&YnFHN*vSU?XFhsu4X&M)Y;I>(+a?ZgJT?Q4ohmp4`CiktIma5k{aPjsp$RtGylkz*H&2 z3nakDRlBTIi+Yb5vHOp6&@(~shz6b{-K2gtDOht!<`f8!J^)Pw^D_`&%fP$o1< zCNU;(KB}?WIr7V=U3w{@EAmtvtch#tTdKy0D>!Bnleq|R^|xF`m8vkpCOtT8KE_Gk zoFZ@>^(yjZikDi2ha|FWf0P0bOFNb7x8_0hX6~3{`{=3P%ICXkFhiAQi^Z;&$v!cVbX!7+OESe1R6C(JFGET;xAuVGEWO2_5q)X z#@WD;z~`^*U?`yP0ULwKaDd}Kc1kw>KX>{#;$zDZa;VQghktzeIEDj4@84to*$n&} zVkg#rr~3DoLP0vH=0{;QwWQ$S;3OaczS@U@gNuoZa=Aa#+_m+{sjXGyE6l-9bTGT1CkZQ2_ygI?ce_n==RQ z0!g|~Aq2f6jZW<1xM6K!CG|NjLw3HsAC)0RcGiR?0fKfL)<-i%#U!Qbc4NA`>DpcO z>XIG_em|LaAXJo^61ATc(GaQQv+a@~nrxgE@kheGb| z?)zhDQCXDv2by$P9h`>jyNXg@arn64dRUdkAk&Zk^_v?DnSdtZOjK!)3E2l%oZ5LX>1| z*ok$phclZUX2#tX%y{C13$KWnQrye|!JS?eY_>q`!(d@<9vP_c&~R5a6kGAToNu>l zKhsRiE|*fNq*Y(oTAai*W>zWcA#knSL6iH-Nn9JohAyvE@Ie(?ETY(K12B~TTIpsDUe@DM9*)iRC~_CzRR zpeWgw{LLcZa`qOg(?C&@cvEgU zc(TL4(SV#=$?oIfWOll3Xf6ldCH6-z`k%L@e4Js*Xjmfki*_uHQDsm&A>c)uID{s5 zNe~=GX_GIf)YPq`S7gk!JUJ5rajQ7Y)GVm*j_+Hlp(jG^i{ndSu0Ykr|RrA?vW?)ninp>7sE1^~Rn~;doNN59;rckU|Wt}F2=Zx44cV(N_$jS5UZW*d_%o@rfxpn8q zC}+cJmepS_`fnn(En|NgyH_1ZGgPsDY-%IWh=jK1RYDuMePP^qZG*TlY%ML!74gW~ zL0e4iT^jIiz>s4F*Sv|^avl*B2{i>wQw##r<1<{eY>u#V)`j~&Q87q(&hTM+k?^pp zt*Mn#vw9JO?$X~kww%q9V+6?GsQeB=$+4pN^Sa{;dTvGPeeZf1fpege5p+%+j|sk} zE)>ErQRQNw)1dvRNwZ+PnRUJ^TJ?yop&fZ}u9Ub$_3SV(s!MXvJMet|n?YM~Q~_zT zk%Cry;dFj&pMZp$7*`tBqJ={1d4?$oXo#b%X3aZ2+uO5|`w0U%dPv=JTzJ*l~>M7+=P4JE$V~Su!LZK{S8J zmHox_+_$ivlfY@EcQKG-C-Al8iWqxUWY*C1-?pSC`qen!geS{-Nuh*xv2u?Jx#n%= zud0yZlGRj#jdf7G;UUGqGy5lF*885+@kLikQ`qO8v;~ZIpqDkHQ$y(Q+K&s*uL&uz z&R*d@P|iMelpAgH-W#_g-}3&toeGjV(}K3Dv@#AK!K}3;;&>vru7_b5WHz*s2_lGm zOAHToz4%T-rET|e!}HQ5?*Ex)`%3K#dCg{7Oa|Ic?z?MxJ8qmpbB8aMbOvpSQS&}= zu$Ls!8IM!w%B%_>mIBzW2b{P&DBN+?@LtWmoWPL;>U|;C)>?~SKZG4rqhvx~z!iCf zf>R~MDLU*hjmdn|F?>bg%+^#p7*P&J=^wWq)7A3d3ODJIDG_aBhT?_Cw|E1X3Gakw za*&$u7)4~3nVX5*V{0!R@6R3R{5C$%h@37^glz`TD9AB;hl5?V3(CJfZ{(wRwfn6Q zkr=-(j;*@kjk$%y(Y1)#pH5P39iU_VykLDr7ha|!fMLjiaMsi59?E#X_rjn8>qM{Wu!2WD?W!9t>GLX~ zrxWa69H|?P_)#cRVjbS?X7OXg3?9PG9jlPn5Er3KT#%bTAuHSnzDnKs6gA~OcO(ql zvCGj|{7vBCxR|*XJ8nVG^+?;r2`fm&Z1Ly|!_@+JRixjlXS?CJzvU~`lhJ3_f_>le z1x#~4+_mU*xN|s#FWR!8&Dnl`kH>*<)#XUeEy#iqj_UfXqz*fk;R?S|XRXz8y$Zdl zV~P{6(9lMN-+Whj8w%6%>MZeO(TlRZg{bmLwHSx z{~3|?I^^N_yklOtz$fr4<q-L@x$ zi**NIfn(00pbQXa2@|lIPKiY`j5hRs$QU^`NV~h=sOG(LD&tgX#O6j)z2HJA`~aPQ{Y|;&nh!9zcPU%a8ZU;q0cjbV}Y*qwlKV% zS9FXfJjiYUysrxI*5ffQxU9d=mZO9HCE;8U&QY12ElK+W36#x2R%U z&CgF+(wa*-ukmt)AK@Cb%;)n0pZ!t4H-9Pk3perxy*5Mp_ZaCy1Ye_iKRY(P=`A#m z7iR5TM1pYwXFI>%a$P4fQNM!!#p~s=f5kf&{JSp0u*=TN?lY_!R@?sioCWb&`&JZI zHn+`O{nkBMO`6YJRR}BmBx?*K9<`Nzy0Uz|?Q;^4ban{aJm=VHJDzg>HS_?nUa>>= zQk0&|M^p29eI2ik-F9WMbA{gt^OQ9vh+fJU><@Ayef94d-J@B24wlq9UvDy6KUleM z;f+?}ly*Aqzm_Rs@tIHeTlWZYF&JG7p-_E-wCmzcwhc-t_3fN8rJux&m$MW`if;TEaJ@%Im*KbaK%vtd-UFgtX#8)o;{4S&z@rm)h(ZBm0JX?U&vQ0ZLy~Dx)7% zJm=>UVyP&*F-D?)bJJ8vX&75vW^DSD1f{MM8y4y#%!V8L^<-hT(*uI^S1hRgb7G7f zZO~BTV(Zip72;Yy&c20;-XC#oogy2vF8h4y!%pG@r-A{B-7Wif64m|p=VJnIwigNs z0kg*3v2XV`TOIEfvt_*6GGDUNTf5TSZqKKjB8~c5Xgr{jXcmx_gvhL<-x@5Q>&W41 z&G#xeHL)~>^x^vkaKh}w*yf8h14rlAp;wF?PKWgvygWi!CvxzV-WAzlov`VN$DRRrZJ#+!7l?a1QBd4?3d3swFV32sO#3R$- zb6l|I#8gZvA*2?UTIV|-a9t_aBOlbEqy!50&>=V5yB(_X7@jw_=b*oJ$Mw%zla|{R z_8lgdxnk}&r#^AIySnh1%dBXh@-V9k7&;iEI=7H6!csukThHHvo%}dc`<^C{70*BIgx}tMd72t=>7zl zPoT6t3TsB}>L`u{b3t7cYmDN4%c{J(01x#@+*nY_?I=7+jN^Y|RB1e=p^&B5#zN*k`?9teS86>O$4I+~T#r2F_L9e!LWc`5n^PqE-IQVj2iB zqUtP&W-{E6&IV!M4dDC0T0yG!>zqK@zw5K4H^kFVI3TAMrgn;fYXma-;gQbdAi}38 zaZaO$iiW@|Xs0(%vpW*NSM?8`gs-}mb6W3$5PBxSVF-~VjcQQlnb4W~nB9B+mL1s2 z?e>85Q_w;k0Zjx^q$V3WN)AtKxcN%>JQN(1{iK?J0Htu6)n?FheY2{?1$K<23EpC4 z9=>3|HFRZBkniSa&gBSK&V7c$E)5gDoMmW!r*>XPUX?tb>S?%+D57UW9XI*)*<4@JI^I6B=3ay&xo0h8U zL4Hr$MNcAoc`Rxb;U_a|O)>%HaZ zpgVJ=8ZvmF%;Jyjk2ZL!6PsHPAwm=dR(c0(joR0lXl^yFB>yFaG|syNRviz?7MfL7 zElR{jZ5GaxjT!1eDx}@o(;0`+q5_{Ll3eeR2;S+u@xOKC8MT({Cdub_kuTLsqF=%_ zcUgJQPJo01(@mvg@q_UThZ3Qy0vp5 z_}7)Dz-tce+e_F6_+jN(49554g*>yNIJaLE5s}~U=>DcSZ%XrO`5cDKfBG@>vm*0N zrjf!1sorwY@w3->Yvx-od0A=u46V9b?k=)YnGzw0)xZlvyG|gj);pj5Xg~3C=gPkK z3j&7R3`=byJ;6rXQ@@B&O3?+TDFy~Y%MvS?RjQ>`&uwU z^AZ{6v{g*pbjsmm$9>6mKnSS?(yCDLW+u&LIh}PqLv2DBF{Dgj@SF7w?=R;pIW=2_ zqCsoginW<{gBkCazbu>7DDqRUmtL(ddcscXpA9pV0tgC}EKPD7jSqqavjuY*?4Y6Czi zl`|MBp-5DBP9N*Dp(lW6#-bo8Fyyk+aqlb`NR_uZPGV@zmro8zG&koaM$M3a&)G2* zhEqUeo!EG&KYr|D5PTFOc@QFf5YFoxvuC@n<~mg`kIhc@YmLrN@(+Jx>koUImk3_m z!G6(||4pjTLU!__CB7in$TmO7;cEd_>zyPI-HPjytb?X`oYLfyL&?_~-|uUv0Tldv zKh>>n{H|$vpU&Cy*jKiG(%)hPzF{4Q1-q5E8X#n_rO7BpYfDZ!4kk0wQU(;7gbWP8 ziVs#h#V!GnyU&D_43!5(TuX=E;P)ndLAlPD@s`y#kS5|sc2`Gm8|Zc@yZxydvrh=d z-EL*%g3w+6|ETvX6rkD>0WwQEQKI*(`_g;LSX1wCtOEP3WTkvSiIZ zE#6!ovO30%ME&)69OFn*gM_!su-Zu8`pcsATV5|iM_7W^vtiB4nLMpVgc|k%U^;cx zUk)FQ%L5zZW7M}C>CV9UGC_A;)Lbudsuv-nN`MW_HQ3cMS#-kpb|95;i7+kOu&gs$ z_SWU?m?l6f1RoqB(cmHxrYcj991kd=y|;>8F~-a!jcC=Iwnygn(M1^XK(!{Ax3mVU zXV7vm%h%olb{+28vy(^};4`+f=`AV0adxU~k;E$b;1S9tm6>3Eqs(;WzqjKWU9A;LvNyN+UQ&wBB=U(mTkiwQUwIBfp}= z%supRQ-*l0Om!~TBHTQpR^N}oi9@2}v~DcJq$H8UN!;u6;uq%=(-ah1G7S9+y)8vQ zOUf8XOu7ag?A)3v*9mqz<5HQS=Xff-dDy<^+YMOI%qbYhFD_?Pl~)$vXQw76FY*2B zcpV&sgpn{w$IN?sW6^V2V%kSsgvri>tZ-~nB8aAwP=$$za3QT;dGnKALT+AF)%Dv? zhQA>mPWs$QbyN5nCKp>lI-)cza2IcQ;0PBSB&p0v|BI@(42vV!x`lxWL+~KMf`s4> z!JS}1f@|>L?(T!T1PQLe-Q6`f1h>K6-MO7}?t8!Y`@xTS9=f}_t7=#6wf0)oA?s0G zGscE%ajRbOfrRF4l|m?IJxvAX6xca_Tz{w;XHW~aY=1#qOW$cB@_&6F;D~zge zt`YDl=HomT)~v#sHu9Oq=uFa-68yRT*(d;(;&-25lHQ>uBFP^b@xiNix~o> zb&c*`7#TEWUPS^Xl5wXs$EL%_m|i?OeZ_7Dl+%Ug^S= z@S6z@A8o4ceQ$U1_7bW3g;>qQ%yGae3O!*g6W6LhG8Ew*n-T~yp03nKact|uGZmLn zh=9DH_TlZhhqFvX5QzToUnx1jqVnP2r8ovz`0OG7^J}((Wx_6+w%fXcpuu+Ab~u@= ze~BF>_=>G!F#kW$^K2t(f-k$VofYu+6Z%|w<{^?^UTjT z3?Q#{RJx}<3h58LbacEWp$-~;rAPEu#=%5QA^CRfiMfBLKa7|5>5wL38LNM92no_0 z$G2g(*wLTsakCl2QQ>HfMRDx(`rN}QHm)DEzQ-M8py7uiKF(36UHfvS)X|cViBpjW z0ng?*2!7M4Ld*i4G>V@y#TRXlATnA23EwjXNpuubA#gCHbuS9SN>yCl$bd)2kTViA zpGCj)W)BhTm(|;*@0NUSi5YpQb^BXZ1tG^rGODxED}^!~HP`f17Lf$IZX2^Xh1v+fAbB#hv&E$#EqHK(fz9X(IEA$VC=q?<}ZaaQ{A6#^QTX96SS!u#7$LP z&br%_RBA`;o{M}pl+w3kbH6AFv!dBo7o03Gu2BoLB8xNIjk5mg)&Sf1s7#Iq3W&?m z@9z)~xjC3srRnjC%EZU7QC8pA>YQ}k&bsJEXdGET{1HOfiuwMs22w~Y7s zxAB=bCxAy%;rtvOu(^~BG3|KELH-9!-cBS-&n%%(a0*Q)^~>>%1<5*9+;Hq0+BUmyD76SvIkqS~)o^ zQj~^LH6K)GhGNJM)Os(d-|~v5y>(aT1QER^1uVOgeqzi^tix#Bq`g-QZN?+MM+ynO z%NUmNd_Zm|XUq#IB#5rn*>H}Dv8s^Ng1yFKkoet3X=$U8p&>k_<7I-CAE9C8em|36 zkdUf)&~7G3Iu^^@YqX7-@lgcix@UvG7a?cZq&d-vVJ}V|d3eRH1Oa>2I9Zx7joop$ z?xRx{)0(3$tm`cccczwJehyGm zA^7veMmNE2>(#3GUOS2v($wp_GFQ(9CXR_kGKvQE50x8D2dXyv9nqif@N(5K^EI(^ zUMZ*6l<09#1w;?qkamae$%F$b_^3<|B46;q?p2Q;rN#WixXO7vm6UtPA|!G!Bz;ka zHw)`=?Y0s7q{wirb1+fqHk`tHJ_7tGtpd9y*;`b6Hd(WmPYtIZOt}-Xmk6(h^_!&b zqQZo2`?*Jh=SR`k$5d-3f=@pw$fmnI1p#d>Hu1O!M=sF1qXBFD1?t_3VS{Iwuw^sH z!u5!`Ne2ewt~n-#c2y65i8~}bmHfVCROGkVWMZ3;99RE4dr$v%HRUPopvw4ny#B_! zc=C$uOIDPtpf%Y3>W55y$MB|cJqK*lPF|IsS?fQysI@=qzb#w(Uo`vQ_WWA&(q=kq z&oe#-gHBW5juP7FAZGWn3ib&dMkUM}BHsQ{iQJ(kX!YL;z!4=hL2+a^{{bd=ku$7u z@|g7Ik2G?Rj%R&I>OC}FE_Nx%a92# zRjtE+t5b(8=%6E-gFc+SID}K5V$B)OIHw{{5tsc3q;Kc=jr(=#@BC--$~JkDmEEp> z^5h-dG|o41sPt2Pib1Iw5wl&h)#_X}rPDsRDpd3A^$_s{cRUHFcs%AoRgX?l6$u85 zTo%c0cjcsJO_*Hh*KCp(3j(?fa~i9YlTVD-*;;)kEpQJD<~i2<-DS18%HJUOyu z#{UV)dthA4!r4MQ16+DJk89+p7c#$1-S+Fj!DQqOWc4akIGuhWwaI8aQ{SpvYPWBn z{Eg+;j%~_{?PnrEI`Ya5)Yx(GI4r0Nh>_wM94w@+XRam5(AJ}+W%HYsXRt^HEGe}&)?as8 zYea+gsvi8!r@#bG$Te0g6?Bw<_(G^{LGv8w)c}(jz>>mlgt7|H?Z8 zW}`4Jlb03Dv%1%ftlT^Xj~y`7IdV!;n76Z~%^&w3sPOg#b}oqWz}G|-(@R?36PDUs zFU~p7wkm_x>9029QBu3wynil{=}9PdO#W)i3D0|Z@@5P4+%O^3A&gGO$s|J)D+->< zDX4$=mL-Z7S|JocXib)z55thl8sa08&O16@u+J;~J_IH2@HnFWcclcn1*JA9IWX{f zs&;O4M5rJ!hI=(ONAdFr)?V`g&?N8KVxr!MTcQe-Gqj{~BYA=q$tV)6-k524n2L@4 z1a0L&SKJgm_k( zPyYB)JZ7t}FSmm0k()EtZqum$>9CY02qV&rH^HEt$8=3q&bvm!^xZCv=qKv!N76`{ zMLD?G7aN(fuO0x0G%fu$yZZx`DgOpJ6YPnoM_g6mnWtjPVejax15X*6*VGUS!muwY5S zJ*IzUv?q$ue|NUuO0oBbs?d}ylsDjxIB91x%rtk0ksg#+jpl7g>MN8x<3{1ks z0&j$OUUl9**#C7=ZW$4T3Ms8Ud-5M9$RnnAe=L?GzE{()XAT*=A#u%wpCP92SO!*8 z)sc(0c2V=OPpvP+Y&^G0cj=s@oYJ38+N9ki+wuvYw@c^n!aS6TUCV4YCr7|;Pbkk> zkp6`4+=^kss0$&$@rO5cqc4v}FU6nfqkH;tQ<^Yp_}E!ym~rM`*>YdKG){4k37|qi zQ5uV~WevOf5ZES#Me?YehBB3HJm7aXMYvV+{1;D{6cq2Uhi|~h8sddcq!AJ~4>Z;I z!9=%x5u{6)+$v1UL`AvkOkPath1E$8;Kt1U+XpJp!w6pE*>jnRM`eRiL|j!%SnGrT zJKedYg<6Y2_oa83AOoG>qE`wVJ#En~$#*E>`uNV>eD!u#oN4y~&pV{c5DL|pIkSrA zkk8l+S5kbOR58nLZ%J$G`;7FctZZs|g&io#oxJdFIIO7yU#U|2JAi3#rVn$ zm3IBd_VD|Z=lfxh!`j2jBjNru{V;K4palIw*2M;{L*Yfem4w zi+BHydkWD(?<#?*e8zLnUna~%4J7wqLzb6g3>g**RS<(Cpx&^G)XCBL^@``egmR|+ za)y~;@gYK>k50FHHuM4km#oZr2^4;PM+F`bI&DpiZc4U3O;+1F36?UQ=9NN3Y?Tv6 z8a(RMW_@lz5cn7?CaPlLXobRN{28*H+k7a!q2DQqy zb8tFl6`Za$DVca1E%?%Dq$lotLqJ1S>G;M8R{=Bsl@Zg0%-$o7SvTrq%VPw7|DoyU zv?E~kWPH!7$?w6M2I&emO(?N$w6~R+B2f^k+B+uBukKFm_i<7D&HeHhg^ijt^}yGA zB3I!3JEjI31t8LLxmex17i(#ifc`#B46J!g>#2nk^K5`^$mqHr$+L*TJZD2#dNIgJ zs1%H68$#imH(xZv2Q!dz%s&q;TTj+M2tfLys+)U8cNjv)yoh6Bm%_j5<&=zP$QfRj zU*2@lc&iZ`2t%rzBbMvTcNYSv$nWv(cs;$m_v*7gjALcSBt2*x$Ls5#l2GLqhJpKc zY8t6E(8c)ap|4khXD5HW6YVJwaIn_O768ty@zSzq$zYF>D!t%md=$+YK?Ia0YLa5(-SXY}KzLuN?UjG&XyXNTks<5{4g4&<)72aV-eLX~Foi`a@^TZ! z9cW)b88inqM`~GSgj-W^uFo#Sy6>^W9Vx>LX%44*g#5#3@vWP$toeo^-%C#Md6gB!!$YQ+Rm|`zO(W+}MEV-SUgy0zGcmgiRf?S#f zambuv>_FEb%G&7iAh^PW!j*N|7V(EmqpiOji0*yWb4$6X92S1O?xT-wr%*|{6VbGdFL0=4 zOwt?8jZ877S=H{e+QxuN*KH^*;-6P?(mp};N*}1e@yls61mqUrCK?npV)+jA%o>P< zj#~SEZ@9qHwD`Bp#irskrvfMQBpcf#GxH<|2T^|hLV-(#?C~PfTclx0RIK!JT@TO- zUiWCZak-rTvUe8gMoom92g*APTwIJt#f=Sr_`rwTQ|L`*N3+w($XxVSGw247Pe`RC z5?JFfgR^2!?+YxctVdv9uUKEN53{^)R$*gS5?9cS@5?jK^P>rL)$7dz@=)MNVI-tq z`$159@p|IjYlo`OrnB~Ddqda2eAH*YW6=jE1x$0giEwv>e70sFvY!z0(4paIaT-r( zJp);v87LATgY!lk`|eSL`szdOe6yShi%)7sezjiP)FeBw^LUh%S8X~9;#Rqz*4Wx| z!AB$cn9hYeaav3nv5)m`v-tfl?`lZUn(W=Lq-$c1INDrf_EOMFM0LnI{&}z3#~aXwJN? zT@8F2SUZ3$j~}10La8fn)_eE!#?- z;O}>c?tXU)BdxH%TI%HpC$>7rPmT#adt9R~Mthi3W~5fHmXP~}D+2D?ISFp9_mv@M z!NqDE@@xEUILkYlhKn+qgt$|0s}VFU&{ub!&>ZcO)(7UT2|hK#<2##EJY7}B(Ji9v zn~z z>uz4J?-UCw8wXs8a_u~ZBoF8g_Q%l8EfBeHAzDmi_?d%5!k?mardWvZo%fEkg|)Q2 z5Tb}wZ9u3KB`X(M868vOq2Wa?+FY7;Efhq&sVAF9mMRHoZu$yxJGy4c!><%P@=I%co2PF~r>mn@ zzX2m#Vr*zuv^$m2<4B6FGOi#U3BUJH&2;FRfXKWY!N zq+0&UVORdcjSyz$ue!KxB)pbAQ%!dBeRkvPS2@2qW~An5;U&cDbxl*hP*__vN!?eM z0gjxtyW7>^`+G2faGAVhtum!q2FpTm;_*VWjmdQ?apR9pcK1fCo>nZQDAQ+oPx@v0o2CVBv@Pg)-+L3{1r^46Zw+$VXm($?XyAf5({G$Ka0JkN1#6GO?veE(ytF+?Z zFmu~TLt%K5xJOD$z$^F;yIXA1-PD4GTU!Hh)l&E;qoGA=#M^ljh1T31a?2d!M}~V{JJs5fwoTDQOJLO3_c5Eng+4VI&% zPm2mBi?7|`M$4N08n{r%up{7IS@sHDGzt@@=ghlqX!2SPI!bU|1qC6pO|~SL$SXR& zw>XXW247wenW!`8zY^9*bG?E4ii1(NC;*WqYy}#Hb8Um*i%V7I)KTu^3g zQ;gYK(Y=Iv!tYp7gTFx2Y=mh`z9kR^$26m+a4hyX)VqM6IH0t~o(dC6TEXYTGxX$L}RcaXm?uc2?_XmV3lhBkEpd@f5-Z zn_*p6{FM6wiD74+4>q}m^@}}c=iJHEx?15+XRv3`{`Zo$-dTLZ!@#_e5gxJwT+#es~R-6s9Vaw z3rmyA>5hCryV}QnBb-BgDx{XsRe*OdBE6=(2KIG@ya#s8A=ktd1>PpCkE6xR%O#E~ zIE2+bc5K|m?p~n12ucuiYO-eJ52@16pmkKxWRFCO&!jDVj^^Uo)kNc|wjc?j0=QYB zqd$sa`NsxvQAZ@tVmL3XzY9wvov%`_cH?rpSYtY#Udf6{t+1L3X<+W~Z>$|88$uh! z_V*5?(46(0)Ux;ErPUv)z>DU$nZ?=ZyU@Q9&2u5yRtUHKPJUJWPNj>5q3s}<4oyiP zO_H+-@f5y)+GRgaLA+P=)ZBZJ>fzm)ubKtWyX^a}aqj#ym3 z_Pl*6G_dpQQOgz2_FA$nQ|Rk%qLr8I{YCL9|6fj!I=V$TB1K-v5X>NzAQ=}%QQtJQ z78CO}MCDYC3o}!-k*M`^4?pvXf|!9P^1%LXK#&{*6w(jhNsOC0r+4J5mOw_`4dcUN zq}FTTJ28jobgpF6qWJ4wJ%MVk0xqW0b7jY32tT7dTseP)l(}W1xWTlZOL0Iom-%fq zm)0L$6c)n5cmiE^T}V8{dc-BmOaXl-6QQR8wzbh})(4B-u)x5D|Bv;ezDPZ1f{JxFdy*heBZT`m%~a1pux3% zOU7)1o7C85S5(`y27(J0?Vo5lk6=PX>htA}GS=!;1OK6o2{hD28 zBthtos+5Y4t5=MDn~b&@Wk;9Fho09ZciEEE<<~R&s@BubIYIajXeeZLC7DrCQNQjN zcQqyD9L6w=ey|#^hifa8d#N&8m61``yQj`Wwy=XjWwRR0`-4_@E?O_Ig3~oZJ#+&% zhbPnq7XWm@(g@g;lP(dqH-?wwdIv!m(R#L~B{O^mZ{R7(;Tyhfz$yvZF2f|nl$pX^ z&#Pz>njO@=ar5qxv)lW$-`kX)07mDngImBxW9T|g6is5#o3^&KS!Pa()t)xO;Sc|; zs1;lim~s*PHnq_->eUjolJu|*=BW!P#7jtK&+Lk?S1FQ<+P*8b?`g$ zM^<=|oT9S=#?Pos17XGBIIE;zE!2o`-TCD22jN{?%tE?Xi&mDW&E7JxKbnSj5&HiD zaytNC>Wia$*`NF}G6!aLTIy<>spk06eZVR!$!BT~z-6Of0QxH`B_9&>1>-XPN$wWy z*^14!WKp&`(qupT!b~|K_2KLMc%WtpNLg`Tud3N%{+m1R&lZ>a`{jvjN!9XXYXz z%{8{<6;wq8ykfIf=cXnmHFlu(`)I!Coc+-4u!dTqus!Z7xTBIxiKzgvLvz$mke!_0 z2lw+Bvv@>fo8xRM$m(7>&FYs@pNd%fpuBT0dhA8SD*zKh(p*pxlUN^AG*JDy$PoSd zd@uOp)~=sC^pVa$Axh;Ti$cE7S&ERS)SRar7|z3zobGPhFjnElNRz%UIRG(p9|?*J+vZEhEB>qOHP0kl%JphTTjiVAe@%l!jZvF z$vI8I0inO$@V+-W_g76EcM{L^6f^xB%$7OsK@Y^CL#tUDU!OZNo(&Sy>T94&*;9?= z^RHwjx|jGMZJWt|bO$Kxf%N?)KnIkXMhX6|)<2w#tszV%Q7kI)j)LR|;s>Od?%|?P zpD9AEuSi^DgX8p+QZlk)?=V|QQDFeCU`~zAHT5SX@!hQZM)cUs0Q=+{Ly4d9f3ES0lbwu&x?ZK4 zeA@c*2zDt&i9qNLB<7Dq2kZCx5dr-->*H68Of0S8eV>rDB2+tb9ok$N(Ke*@c54^WWhqe zSuHJuci3dTuQO6Qj?{STx><(|a;5M|gv{cT?HkPe?BtdBayR#3FMz7lw?uAhx~{L+ zh$3%0HSG76m}nl`*Qm%LnmmDdg}iP-5lexn&MO>{t#8EHrCT9@wuup`pVle_2q*JTYGHa9c$o_tfMy5~|;2B(%bODr1(q z=<2OsT0K4CguB65`Cx4y#=I<1A86sy%P{LQXdzYfYCtZV162tpF=h)3uhkox_dQj3 zPsTkl&flgrhB)6(M*s6?-NvK^mj zK)Kph+(>s9QvQ|$4^h)mS9c8bQ6636*A=F>+6@Z}X&^k48lo+>0D#_z{K-*EdQiHw z_^1mJJ@rsv#ZHOT*-6uPR@aUXg63IfkR7{dTh^7{_*dk&V+xku@Gf2i5|ZPC&0e@( zOgmIK_BQ6!*%w-)MGXLdPFkNz&hX6?TaC42HW_#8Ncw~*wMyqFtC;RLZGebgxM-5` ztkFy<=BP{rr{KIBV7E^V@}_Vlm6g%I9D3bk_TVWzr7X^bT}2K4+$swd^J}~Aka<_9 zMjU*94V{(;*zh2YkFL6(*Yc*hz$rh+8LZ9gU{tKuD%gN>i~K2_*!90VJYP@dHKtKk zFH5?BqKqPQKG4dB3KeK{&xH1v9lu5I-g3Nh`sWVJs?7&eaR!x0lPt|vfS7BczULku zqtkzWkq(vna1km-7RoZwk&Oog2NrPiAKH8PVNz-{v7xFhlTlcJ9C~@~{cj})gDxN+ ziUo`tc6`*^o~IKp(e>@6b4fh6hW<_i{{oc$cw4LW<&I2t^13l#Vv0N+I zRGosKY)J>p>F?wv2-C&ux3@wfod^hN!0+IBv$pnMv16nfY2Xz8^!Tg>tk?YhKHDYYgPP(xH%<+9Hcq7aw!aGl5qH|FM5>9BGtQGr#6_ zwgkCtInM?2oa29Qpwf1j=7J(G5D(aJkhx_vZrg1n7XP8{kiJK~(4I7_7*r27;wa(s zVp?72QSCARd!D*#a%v-x5DBQ3!0~enmcQoPcyLswZ<}5^`2j7Sj|h7hq+RUk6ysV! z^|BkilqaoCA9}n~o`6sd_$R1-CO%G9IB;mmw^4dn6dDku6u>1ePKmnUOP0w#Dmh<@ zuiN&l?+;-)`C8sXZ?B9Tl^ zVGj3w;K*!?rXeTbm-Iq3bz9C2i%IR{u!dl`K>Mm*%OuA`?4pOf7962EGj{#J4B7bu3<$GfDZu%;} zgIuZO@WiHIR0hAkgxhy)GgH5E#vRTw+AEl&b@b5=|MUuh>MggBga#t)!dE88gekTu zLP7q6u9i*t&qM5Koj| z;ml|h5cLVDtq(QWDvz(oNMQvk#(?6my~PlgiAamA%OMCmixI=G0FnKGG|B)u^SyY~ zt`~f6Bubp01VOBAIr5P6VNQ4*A8VkXdDtA>Y0<*n?KIxUam;Hjn}kM1bQ==5h(=so z3sg-jS)N&Y$o$N}EK$)>OzEf1E%jr~{JQLw$7+YuSz>C8|MR-nvEHFYR(l6bo;3nU z`p&vwY}V>bxME6AY}IW~FI~1Hze-!+FWvX~paR<))?y%xX@6j+0-+LiLKNJ!`1fLA zapEi}7`6s<(!@sa1vj=DuUJvpnE72pa!OZi{mm|6eApzL5YJS8p298@0IUP3NR&s)s>FxQ zFXa7W!;B4{D+Jgw_6v=F%-B@!ZPj^8D@D;n;o&~7k!8kfsoz*MI(!4Q|04duvSnN~ zcaLDZ3jpEM1F|~_c-7?lZ*hn4ff)xla)K9kan3t5Y07oeaspAjU5e?pR1*?(cfJsK z*x^{HT7Bj$h6G(cqPNb#CO+)9*gUCPP`C=>N|QFb!%br$3yumEw3DCJZ4~na)n7)K zFggl4uN-QjorPF)i-zpnnFakNGIaa~Oe`kqiVe4ilc zkOvYyZ}_1rfmb1US+~D`mVilGcRVlYw)F;e_|MbS#SLkZ){MB#zhU)!HH_+r3_5km zwL@i_$oj#Z$C;T`>4zEJSlSxv*4s9B9{s&C$&3Pfp+Uf8U$)*sk_*P3La;!UQh+pT z)iQHlrDe>T&$&h^deu6(yvr19(uS=lf7z6T&OF&-fF$WANm+5J+j4rLdLy{wn_8Up zwHXYfp!uO&=fUr2>i3m>j=5dZX4Z^x(??kwq1N(W6dOj)`@`7Ls@v;XdWsPIbd97le1FqD*0eZ@a$&RTUe7ZwSiL`YL8Vf_V7wZW}e;V%SB^yi8t)WTKZ`c*McboWfJrz#MPl+P^yrU1F3 z@7Z&STceq>GK6{_8Q((D#&X{|=K6@kR&2?S+Qqq11gn=V@|i_O`yN$dT;klU<4)J7 zV5Hrb8Z>0o6aN4ddX;1-?tqBmxq=T+C#culZj>wPpEQQaX%K@6X3jdo@Q=fn@ct89 z2quJV+7-~qq#>Lw^iK0-2ZJjdH2Rq@X(XNyR(Y|( zk;ij>5Ny}epX^jfu0{zfWAE2(JLx-l1z#N5*ZMwzhkvcTS728Ml&XO8amCaT#E@rY z*)~LXzOVTj5xnwvN&go5W4)GuL&1U1=<(AjZkGU$_|Y4!PV794O`Uns?ytn#g z207Io`F3<_(;e@7t=>x+AM~Uy*`#y8iCiSQOoo$=z)AH=4JArCz7`Fu9RxzdL;^T$k#^D^_BB z)qOu)RDY&-8}J9UlKm7YoA%ne!#BJ2F;<@4E-;*Hp5Q2GJqy07#)j4pyk_ZAiA&*Pn@kpH8dhi>uzLq)*in8AMC1xN3;tol4KL1MRY&+l+3$!sqgYEX^mh8zesJ>8n@Y*6;gW z>(#EJ>~>wG2sG%HXfB1DCE<;0t{TLwJ=DO{Ce{SRI|2&1TJt6E?QFskYAvUdVjvta zx2J68!qg^5i>k)ai5w4~cIR_k-JKBs}f#KSZ08vUedO<7R_d8-Zw!{KCf@>_6=6X|+{RLfP7qWY9%=r5( z;&4xRol=zcb1j1{bzu9SL1pch-h_~-2pv;M9=s5E6w}8_Xnb|*rZIQ9+b?A!(1&(w z82JgSw%w_!O&Gyh9EpzhK5{&8^=?Wje<4xZ5CnT82LXJTzvB6l>u)R_OIh!P#u3@0 zij;Xtswi(A5vux=Xs)4EWp=x*srr$DF0;pyYV1&d1azHtPki}j>zvBUvxzSM;BlQ* z+nXYMf`rC)e{hWf9YA~R_sK(MRaKDhS^y;|9vB%l2Qu3#&%V^gKG_U zG5VqVl2mG>D!zpinLSO z4bJZ(K}J_v8vlf)?FGmn5vgtXyh{dPbhr!-XEMl$RLfVrRL2HA?Kak}Dh5yFMBU;O zu2UF33dlEJ@+a5RD6hI+Wkk=sB4CO3xaWq}W4NsTt|kdCXv=9&uayl=&jXCnabZc< zn*?Hu`u-(*xIuX;7Fi`Kj@J0^A{L^^;=g${o|UArzrEiNK4;#W`9^yE9;J4-=m<#b zb0R~N(2wOkRlouGa;Wg#$;}x5=wS4*j=ZwytUiolG)=pZ?Rmk*+ zcvakk?(2O-tSy?8fn`~JF1yd+J8n8=ewmjMUymq>>MqSO zV5W&;F8Age<=rhgy_F^J?kT{*LgFwQ?3#B?zo8rVCp8Wb9nYn?j&O>Oz5?))cJ$6Q zM*O^ejUU`8GKXth{S6+qo@qlE_!N+{X(-`H97NR?zNx_NM4pFh_1|X4wHh`h1@jbD zcK0*b_SGw0-HKJtReCaQmqgdaa@JR(OTr5kG{Tn)#zQQDL`G7Mu2j_uZP2y(MK36< zDZyZCtmvfcJesZ(1n5h)bXF6jjk3~bWq03G}MD&>3Xv|Jbe?IqBGj+-Q1 zI{Q`fFj66vd0^B0&w+v4=4=4B%`jdEz!M2kLl3p*1 z39u%jK1K=e<*Jnc%io`Ix+Bj`A~F}!FBOumTCf6pG8tqxhOp4qP1LH(NX@3rqi?6C zog8Jq;r8ZTH*g5+5B4O_MmY|j0O;*axqpogT&te_NRJ}f5>OSst$4e&TMEd2yF6`p zlGD!u-2cFC_~JT&(%a#6-X{B;y+^~&IIF#QwS=+J?&wF0`e#_-1utp}i0Tdxy^kn6 z!_7ghg~Y~hYT*dMu2llCSsJ@oOa!fZ~L;DT6}5iE1RHtPYO1W5sJQsPc8r(Xg&614~xT zT1W0Vom1@C#JG2;bRYG0?&{XRpohMU7>PS*!|FhJd->av;7+%JP+voHRxXfu>U|!V z)92{x()}QT)cVC^Va*ADz18f;P~No(i9syf>9Pu7{88fOXUYL?ra^x9WCzk#xn`bN zYb;vHVQhO0D>6l-E0i?D@8^FYFR?%pTONgn+IhePm$6uzZ#@st=2EFwTpV!6j9-qF z0gfD1PQ@PbKBVVoyy(}KJ7GYuK{@8Dci9Bh8VX8=AM9?Lk|Z~9aUy}gls%yN>!~-Cdv@tSAUp) z3L{u|k*cK{!;T@sabo(VtKmJHm=udf*SVgwOqH?;2!B`MT$@iMf&BB+TgyX?*C?8J5aU9{8#v**y1+N<#Ze?#x9dbe(q$liNwqXu*Cg` zzRbjcQ|@u1GywO#e>_bZ^NQZ$Q~K!wv?f+9L!+C(oLRs5>pbjDgTn~wH&teQ&>I80 zyEHMttpXu*n7reokaI^@PR}kq2629SrdWP6LO89Ndq#85%S#Iprg^i60LH)|5p@@- zB|=4SOzltfxVcAk?1Y@veghP?Tya3|XF&#lioziNZtvu&!E;{dsvbaCu!xPbgT9>?`L`S1jJFmuG4!g}R}4)9=_T(7K8 z5|}$`+Z%t`h$=)N@vG4hU-gLYsJb_j5Z#V%JmD7f9MkU@PX0G4!0y|qWlk0 z%XO|-nVkE^7k7t?tK{v%87{@w?RfEJh6Pu?VZ;2m+xu@1ygWaV=*;(n{sZ>_ zn7I{z#DnFPm?t6`4!nBd4#aF-&jSXg3FUjjL$&mQj`*j^tiu3?Kmx?tQ6ZD*w?ICo zeGT~u2J)M7e%JqZo9SAV+9k;Fg~kJN4X{@d2 zM_@hq-1dTGoj%HyO`$l;#F3*OBcj(gVI++ZVdTP!@C~+i#d(nYcwp`jHJwA@rRaNJbzAF8aT8LnQ#C;W?oSju~quyYt-9poYnvwRR(zz;48 zPn#Flk4*QvG$C}R;8TIm;0I1@gRDbf$Z>wB;A3rpa``x zLjQAt$ynN4g{~SS+dW_>%8I<42ArWE-a(;|KG=F z`^>0o`FyKOBZ-(WEF%DPU1q{h8P38?4!tXj_wgct?|OMc1Rjey-il%1)=!28@2+;JVL+|rTh6vMp zW2{$?-c!ch12LyDqV5?p8R>?`!;uhau27*5E*;N~_qC<0Vh;X)o6UXRA%h80h1_I0 z#B1@+W=oA>R#T6PKU1AAnSWZdGE*{S0H8EIlq>W@9K=CVfMKG|H@Ge}In)@{?`1e# zAjqXEL20-eqrbF5`gkYB0q@}cKCZJRSDWI488j-YkM&tnH0>aybP#h2GW7k3X2{xTu=9zc98+x%7g7H!>L1E z4%-Sv1yChT?)2!lZ0d%+6q%MA$>%y2BhxZ#u5oM=5cEO6#<+Y|ZE6WG z6(4bVTY;);*=k#7beg*a6XreAWUOkJ-_Nwg1iC8K3x9Sw&f)p{$JHJ9G682J6?sOM zw9inZC6#3u2Pb;65O+(raMS1-ZJ!4kM%MCG3#9H)2gw#ou0GgXVHJz95VEV0BVVE& zm1d|x+0jAi<7r(7 z@6+abQLdQMWeKDN8bWM1fG}4y4t|j8^fI1TCDYQrTjE2x64^mfWxD$I_wxksDX!v> zfg9=rfzkt57?Ix@WPK0;=xhvtT{Z^(;QujE*~SK0A|$buROCl2c@uC4Wyci-u%%r> zNPnnwlCiN!dC*_<7lWFLdPZXV{KR^`m^nY8{QqwFj{#OEb<6B}41lY6Z9C`+zs|~Y zp`>FDUc>(DcJ^!Itf$%c*9s1{Seu3BuQqSM459ox5JX^sCK(js<_b)}+)NsNW$n(uY)h7Yd>ca1N~YMH+gmsCDML{P?is-C zRr*Jfe1y4e+&_25WTty>#+2@oK7 zfMCJhB@o;~aCdiicZcBa?(Xgm!QI^*8oN8^ykC9w-nxIFx~r>t(|h$|%{j*y&%=Oh zZ5_W(_S(i{*uW)Bo6nJB0{Py6V z^A?e%>IT0|WZGB#ggd^G@al8LpLL=(HWo zrsWYRoUZY*6jxR}^R`L1iC#&0SWi9nV6>a9kyfgx|HULe6tMv|TE|-sC&3mt2FRXJ zf158+{%|Zp{KadLTm77TBKxGNv2@<%6#boqOFrWTehh3FjOvQJ-=ICD6uef~M~5s@ zRpYf75>6`b3sO-K&}z{DbdgW0C-_2f9%-fO8+WR4r&QE5{D(mZi^qjr(nqhccw zS|xsyJ^jHzdUh2{_6F5Hx{3A}V&0FrpU5nD?RWE9k-jj()-?w|@oNnHv1fN~-ke){ z+=KVsUeQ2_M%Xubj&VHbqO=>mTIHPuEH3PtuIi3wAI&EaT3FrTd8;sIYTgH`v^_6~ z$PQ#KmvTcD!M=k9oj1Qw$bbIA?lm|GHmxx`tb0K|Tsg;QMc(FdGfrF$BoayTg1IbF z6g5NLD3g5LsDyM$>-635c+&^QKS2ITC7+Z1eN7uFw(I+*_6JbLGLhO0by=>MOQm!E z(uh(Pp?aknbMVEhs@HqP%jFq#iV5g70OsYDIdN=(j|bSy>2=?~7S%`lG)MHPM26aP z-=8!c^!IVbihfwli+C5-r7!oRth>1ycQ!4j&y`xjPK?yKA>po7+Otb}IdrkKdUXrv zqvNuoW&|DK)G1XZ{8B&OGeHq<%q#Pcs!xP)o(@$7{G`plyf{{eSyXCLM0!lRUau)3f)KOh{Cen8@E2N%MD4SU^2&~1{=nHZ0Vk+x9}Z36l&N`3j#OdRg>=FfVBh*-sVKDK~xm$z3>O)byn7 zsEcYVtHVswWex}lU_kk_R=H$Xh9oLiU%eft$f~{LFW;vaX(nvjj)uenH?8~&Vo$^V z_24A$5y*x6*!*~RShmHa=x}_AlE9J=jK6ccfqP!}3?9oH`szD;*gvPsOSc}H)(ft+ zm1G@)z(sK;4*l^7lSSKluoq?BoqjE~(_*v2Tdiq6nMKyJH(lwRt2uMnS&ZY&diCK% zsS&b2==FZ8E1AjGq4GQHpA*aM63JdCc!uy(a6vVRMUR=rTOfgMUsP(@rNpIq7sk|6 zGJ@2{IdzK7#yTVx(+!4nox5&M=K&!?KH_t_;TpYe{#bN!JHtN7ARH3W;9|9$yoT)B z5czwKS+kF=NUtwQj%^7oSOof)u8-P)9hME3m*vx-WECT%UhB_r9abfxu{3N?!3C4{ z6Bq92vZ}3n4)N z7v`8j2|XN7a`7S51R*(WIDykB;_7pI_`dw@U_W|x*eP^LKP7XoWr+F}ybo&y)|KJ@ z=q%C3uuQ%DrZ;i}x!f!hl&YqSFlgG~fc%3MY@ zAu^j*b1|@X=C?-Vo$VU6{nNC!1y^&o3?zWL^gh0Svhcv-awtMzy;uM%5;Hbwo@JbF zUQPBEP(Xk_HM_O$aW16&L!2Q1hW=u^g0252A1SFOTU}2Zc(xPwhsKx@akn|kBIYfC zxifG3X?te?bl@3i9l@;Y7^Vr_?`n|OFw!3vEVjv?f*g$rx9=QI=7|>BC!y51#RLgG zS&R(7T4v8w_{jCDkD3m2kn5e07b3Fh44Q2=1UDDH>p)vlt;^XUolf!jP@U0pDM+ou z4eXd*3{W5)=_qtVlXSeF*|%!rY67Q*ABqzrh;BCsW5`YY(2TmC-(FJ!9^g)q8E5F> z7y2j);^WX{(rmK6nj0J^dw|QJ$R_~_tj*Wmo~sK?dOA(NM|u}bCYjbEO{CK6c61t@ z54gkkJ6GfnZbwQf)uig8xoJy{6f%4ZVcoe=J@{}sp3dWj zgg%#I75#&*JU6%w4C`BG5#62IO@7zs zr{L8VDGfX(y~&$W7+hugi_Q8Xt{yrd-j}FZZ^ntxR9O8ZqXg5y>h8Lyuf9hbO;Jby zJOf8bfZUzjFY^LhtLOLDg|PiiduF>Ja8#NM=gj9`H!MC50ES<`IAn%Kg-~sF1jh4G zp@O>=B8kDq5U0iPB6_l?Gpy(<4TDaJ;-L0wNpCmV&TTg2QZD>q=*4(WQ?9WZ{?=*M zM#eCfK_t)`Z|_`Ee`~q1p(DpzTiC77Woix^EJs3*nFR}CDD=A>WgMc8%0!v^mYZvB z(j=iGPTv|La3vMT^M`>}7b-PL-Z^{QcUy05_Zxu$UvGG&B4SJ;_x}1?#6<+rOfEb)3+&!lJtUzC4ZZZBRj_wn+8HBBs{hGmO-) z=e>{Pkt?&ZVhHxMJXo!0ec;CUkh*Wn8oRD#tQ5-x6)97URf`Y%XWL(8yEHV-rS{;JeQfdu);EOY}s2x>J|#oTdkM2$^7H3apY>Jv zo{T@e!z0ZWi5D#izkdKXB!&@yXDmTl$r`3tj71@Bo<#%m1mKj?O$*C#QM(Jnr2o4c z=lQPyhQb#sDer~f^ViJW4wrQn9|Yn1;5J^(_6zBI47#45(-R*Q+ymw0p9~V@coiVy zNW)6;TuQ9*bV|}>Q``AGPm(*EO zN)j75I*{-IyTHEjMe}i<&i*kti@nq=nzF>zhpDje%yy3q|0ZlOUvY4b`1I!8p0K3$;; z$~yuAlNax0|Fn{7IvGW2?1jp}xY!1pT68U}@S>pLZ3Siora-!E>ji#xe7%(aa8&jB z3dH(J&=k&bmZlk{w!Hyk&sgb^4Tdetk9G`Xd+%!L^6|<{6$!j8DH0cd|K)=V z!&~0HACU4nA5)3Z=UM2j$6%?y2O+f>5=)uDHN=+X*Kc3kpIP*#cjIf*p122bXQYjJ zs9n%fG7uE|W&J|wo%9&490LkFM}4@!YR+KKtO-Hh;A>T49w%6cvQv?#fV4a~+R*+9 zvvvMA82eW8H&x0>9Ra-ApsVyAr-A1^%u%GF|cL4K6`%{ z^EjXP#@kzRIl#1okP<&FGW6gZaX7Pb;cg|e;!_u}r+NiKHvLGh(P8wz<Yy{k6X_4n_K2?rwC>Pigshz$k?h*{I z^%nTLo=2=EwaG!pPmbHot_hPJC+hJ6p8PbPh?a^JiYYzc*5Ew(K00V2ydqr0g!Z=sCk%GPhmz`SsP?BG}Aq-SiTO(euR;lVkmb zIbg8N?h5U5TO)sevo3=cKVc)otUh?uXyUK+|gT8UoNdGND>d;)fl6$_a` zvQZ-hM|&nO%P$)A=?LVKF52tIlMHM2c_=!?(W=ML{Y_?zwzCOM$tc9h);gN*qZ$#h zK+S?R_Uhx6{sl|O*d46bG#GU>uFU3K~!yH9hydTUJ~U~tN9cQfbuH2Gs2nX z%MYePxq@D@IAeot)C1G#9ucQ(Cz3fW1GeAW6i><8YV+?!NsEeGV{ejE>|?ZbO1mZH?TnsHcMzMQ)5m3vWTvlTX< zauQL3xB;z`($4vu<-J(Q@J4<(B4O|a0 zPOpP*)o31vqzSV3(%#^WH8#eL=40K(fax-XB&_&%sC}>0W~xs6DzC2$7yU32l-06y z^O9eVt9^WYs{hbc=W(nc(0M2wby97puR2{6GJ94Qr(Od_?Cz*4PFrjlwS^^!CY+C# zTiG=SR!GQ5Nxc;k4Rrh~65qN|yo#qLgI*9^y62lJBE#Q|Ce3b3S}u8Wgx9$0)n|(8 z;f|+(dm|2=5YB>3%vlzBK%ii{8-ixgK#X8Q&}GFOJ;xC7b7&8d=XcnVcRHFw$>vR+_kf^U5}-cjqDuQ-&x5&H=t4_Wly z@Hr~HOZtky1nIC_1O3XU_1C|JmnHDHgWYpZnd}cAE-Rwy)&IF@E?o2FW-DJOUV8Zt z3-=w^3q&CZYdUi$fh4~e@v2mHBJhzlx#LSv58s20EFtkNS!VNjPs54Rd%@aO=$Ctj zP2eC3<+AE@!euvOMBP+OJh-c40Icmb{b?QfU_(kAtVlN+yYQPT34IG;1*AJ8nqIe< zM*V2MvgnP0^Tgg^Q=lia>UgY+U6W-UavYnjqn@;}JQ3RQhkiNLM;~Au$eqHcl^|DFk8CdX3;lGRTz~_;;=ehKh z8eb8R=Ix{&yPWjfq9RU-lP`NVx4s zig;C@i{kA$=t!T)3Trd{giotB0~Bu6}=j%6MJUntcl~Ut>;^ zMzND?i<6{nXY|RtReiU+S;N40GsHW31uLfV7xFHTN+7&riImY7@)~lYi#3Y}XiDDz zN=Epw|N8N`iqJ&une9M{(C!H=63aTOLpX=|G(x;k1voC@Z(70NuXso}NLEtfhWb=C z)$Tr8{d7k}8ukj-b=IRbV$LX~!CiM$0y3WiE9$H16_!mcs6)pHoIk!yeYQEC`hxij z7GjQ}G1j+qDXHSAGPT_I0PaoxJQ%J7F;r8x9#`{+-21IzP?tl~EIb!#!|(;knQ#^a zDA=3YSr<5x2+DEDi~Tx$M0>%n*}UG@fFBmm_-yw3S%c`INdK70R#1*6EH}y}cvJ*A z_;qwPQm=;4IIQwpfFwjN9qNNk9)0xaxCNxBWo{LI>&RJ6pDA~RwrO@5Tx3O=uZVSa zA7>IBug&{u+*Fpkkoz%I2X@i(QaIO-Gwn%HKQ8(+XYU%AwmeERms(l;1$|_*5P5N; z)3)4~E?s1niqiRRZI&$DfhQG zp_ptMJCF5^j-|;54`QJx^FvBEHGQ8rBG)H#N1p%G8fn5!Zn5g4*<4pJ#nh0n4taN> z@S&rp7cCL*FC&tA!u!zNxA`&JC7U3EXo{p;I=fVdxv-^$&8H&-vTsGXQz{?A^!rm{ zf8A2rfXy3|wi^sLLvh5bVeO#2^`}q50b*VM?#RDDUpwYAUWqVUur4v0uh9$XcJHJgKV!rk3(ihOSW+NAsy7b7OGcLItP$?v{Vb4F}ze%_j8Y z?&Un}`vASElw;~(!cd2}W!HDf>0Y_iW8iJ^v?FRyy2GVS=;tA`=&faLYv2h2>x3vu zGu_TMz_u*0U|RQ2VUZj%0(#4$%0$pB#Y!<7+7QgC6w-hR`T-qSIPeQbc^Bts^G7nr zsd4t{xAY{DBCJ(20c0SH$UGazcFqIYK^_=-r$u-r`dW4VuMYGZKIFOwFqg&xJR|Hu zfRDofnA%SHNDnQq_dBAzY4V*?tp(4Gha98P)qPLdL zy-7=>{XA@FALbp$uQd#m5Qk?t=4711r@?AbXeT41?a2}Br2+@E_kknS3 zqu$>;P8nS2q$2<3NH*ZrXwJ=?T6eK-b~VQ^&PM;9)w|m*#rn^S{}*uZX~Lgz8UW-0 zbFBaVkPFs90(gx7|1)#Mz!1B?Dzwe{QmGJ^1-C@yKPbaQ0wkUkom`oJ07EZInGCM^ zM$1*>sZ2#WjNfW%Tsqx80RaJ{`&GH+<>USR1_NQ331jonn#24buW^}~GF2lZBgdOv z-t&;Q!V?2Qfq}M`b~ZK)%*;Z$Cs7WqX8SYU{O?cxbwPF5ev{*>uM35ji6j3mQf}-C zw|YxlQDK49%t&Xx0xrAR{%|rK$-&cPVZQskmE_G_kJPN2o`A9r&R*z9Jih-irOxgc~E>eCvXd~%%I1h&0 z{u-e$Zc>%G*=)-RIc=%U3vKw7wWdW)+0sL^!z`_DD=8EP9uUM|=gB4f!k@G&K*JZY zM^}M=c^OjnmB!y8MWu}H<*a{Opi>OrM`#Tg4LpHRETBj*GnJg;ny>87>4M=}EL=(Y z@8@A}bJYt!slRSbjNPWdI*os3V;dbrkx@wlvbUieS`O6OLzwi9IoSE1OW7o2E5c~L zQkR6Bm`4RZ&aQvA8~GCp!c-Q&Hl~(_#~wr3_wug5-O_qtpQ6gjN(a-;l>)OGMacQ- zg@psD?+!MYy^8E~)pmq--kv6tQGnR5Wqj1|KZ`;Q3YI2eOmRnjSU9&5rq%4Zn;zC` zUJq-iK>K71KSWDAVU^V)chM$S4Y)@-7W+)@n=4;TV7dVOCj(4sXb}ALd32Q)P2To^ zSHQKXqRxzLY#Cp6!L1Epfz1q-uV(Yt$p*v>So2c8I02bSZ={yol~zSfanbpkV=xeP zljkz!&xSNJdGdej_owBK`JxJI=>RTlk;aQsLq5LCUox8n+1(6(rB?ihAH`cw$GFX! zeTAcjPFqKtBy0q%TiDmMkeV%!++SXqW`6xY(47AlFVB%}dKF<*h%jDwk4aB~I*^ZY zRqmG;NOj;^P^2q7il?GkLqRSy0yNmhRJ%`ZzKfhFdSLyn>9jnAyL^=H312iTp$Ien z(q%kEuA=_U_BnHCKPl|{mp3J@kKCPYZY#GrRs>1$Crzb0UZwHNZ!E4ueO%6(8OV2& zL;E(ujHoz_r3obVV@~{Z{4ec0bIib-Zsk;UT7W?k*re?z4ERO9dqSZirz$)39O@L0 z70ZNqsU#*V53+v8X>cL%RI|OD06=+u&QPCk-Vvl})#v>l+h%f+94xd{_uhU3W*rZ@ zJci1NvNOztTjHa^@Z{hu4(ggO3G!jm>8y@`zE|}xGx|q}OU6X# z21sa0px`454U9+6{6u4CSgf^lIySzbk}`bXJq-$=t2_9rvzek&|iUH zcZtSqt=!Q9{}X5$lE;x(R!ZDumu6$qWbu2OK7f84AdnJs6>r3fr9DRFmED9ESk@!s zr_egIgJ80}qrZ7`91<(Bdu zZ69`*eWg2$&^ih`X(8&!d@Uzq?g+&*Y5toRz0q_z=Q>z>PYfGf9xh?AnzP&PsI@pc zJRc2(_1r%zdsQyh-Kd8w{VFBswCS}_Be}RVo5DUiGSIZwn`Y;QcD)bIxHuMDxRCCm zW_wJ~@&v=3*n^NopF`>W%)9Rs+pUd{8yXS)Ih**iSDv3#a#OMA&?NjJJ>7jc=q1bh zaov^4Y}A67YPq^kw8>)CSl9ex=;IyRiGyH&`rbMfWD3LeN;Q*QFCZq44Zmf#{9f*@ z)&Q_@5WNL;J+CXvO})4mGu%aG~K@v2wvpYnWOpaL9uuT4@fCjlWh^uec1 z3xrv~9wz;`x$l(&MOMz?&?OyA2WVgec*)TM4Q3i6oHSe8GQ4pfpt!NJ_X^ytwkfX` zY|bZ5m5V?Y+O>yR5<&i=h6n%XYA?CT3!!Nj(}X5|r@V1R*LhANH|tI8vlrR*RtG0pC?}4X53s=xCt* zXiYhM)$b$V>#U#=k_I*+{D6{)wk215-La70pA}LB?2AHBxGV zAdX5i06SGD#1j1?@LRfqK4q(_SeB2C@uH!8QsR`P8ztCw?0LX`ON zN2BGX(>pX)jh9GwrIV(c%lcD5?h(OtQYlOa>5Gbk+}*ao%aSrwoL|oRC=nE3pe(>K zDLU%-cwE#50CTW#;k}uoc+nfmGKt@a;{LKgspr{kG@j03x|S=sIszioj2FA(gaCOzaQ_f_CVC?H8h#WZ@ZO+ zI%SKz&IRN7sJb61ffcLHDmCYKA| z|L5-?(B0B4Hl|Pg$L+m(qC|Y6Y}D>a=gLg|K}fR<4NEEaP|Cgi&ywhq1$`d$*O3To z&aIk5GBw6b=#!h>xQ|ckP4N5G)@V$QGiGOAC3~bsh+BA__zi)5PD@+NKH3a#tgIOd zmSS;#r&y*_;|Ky1?gbGke+BW8#IaKV&F`**r#t{h$II-Zt$;R@(pX%T{ki=1|+tvE-mldG(*pp#IbLHi<}Eko{$mU0k2O zUA*-A2)>vJW!xd=apN@_l-&u@OC^licSwgNKfnAOM^M4OMaR06UH;lVM*<4#(mmz;)7t#BVk{Y zP?%=`k>Rk77EbBds`kl!Jq+__Cuq_FLjrU`Fk|LUM+;+bz2Q{qjSpLB3IzNm(nFO- zi0rvg;u*2PJX-7|RrjrRwng$`&0;-gHlmvs1->Z#w@eymtMf58g>UHH*jSyy`s zG#0IH5;&UCgIXdFV z0(OA1-I3oF0s09DaQ*`}RF$@mn|kS-i=kSYq;hBeRM+H~r=h(|?~iqxyTs5l? zQMey+*UU8 zr_K}!x9$TMzlDGL)oHv5{^Mmz7rAlc+t0Cah})qhRTRHJOl5+78SJq1HjlUKt~6zS z5sewJ@}1`(&(DFLkwdE{(fHwa^QchXnDxjCNE0Uf>jw96SpwmvUnmm)3>$yD0xui- z`a|VpUi4NcDe0nv;I4X{YQJ_KG(FH(*ZOd{9;1bSpFaF0VkWJ|;kweEj=Rd$n3p}d zqt^290)w5r{&;ENeC@Uy*3h0LTIM0bo>y+V(!L2G+WD;$M~BFd7RYpfDmbs3EsH9F zohf)PsfBQisO|3}@%9=Z7G)hUH@DevcQkib-L(bGNf6gM&b{0u<>k@(4Hl5+t%riE zG8sul8EXzC_Vbe%Uf~TPJd#oCNvVNMOUUz>?Se(PN;zc|CcPbN?jQl`{+d7+(bk@0CDH)A?Okblc#+QQAjymYJ+{yjXXgb9I z6LT2~vmR=14(xU$^P1;(A26M4L)}`;JmFch%_~kt!*|`YKT(Cb-vchL0my7i1dx+q zM!+o%0^&&ef}$P6HXxM=+{(o4dmR`J2}sq08N(MQ?xHrHqomY!smrtJ^7LD;UH<*A zftfw1Lwp8A&*n0>@l(n9ma!*ZF{UCej_qI zL)}J|nm2&7+;6sTWDxUf)-htqmq(8C@~&tSK=efBH71xHPG$B}O*? zof9LzhlBk26pS3eyfY3PzNR2bxRd%vy!-jhC6LT#&hs%d!v#e66^kI(N17bq0A=k$ znR9#AP*+tq`y|hKuwP=x@_}EkRvv1*Ng61!B!Pycw3$jnmQdyU$^=BDnL)xiCjbKY1R6+7O7Pd&K z5H&ony!hasX#1vBb+I>UQ?q;g83^+M&hqsQ8U#Bja=3RshIrLudv1q?r(M#D3LLBH^rs?7?Z=fK`qpZTqg0K%MFo$zu zNv~Vk9W_6L>*F2wD4o0;c&MQmtP1bmUdVLIHDXl;oV@ozlyjf%_x=&E&yZNwPJojH z1IxedV4xHW10W!GrRzU(h+Mn_6--hQK}s$0Cv{{?>?f_(4*+J2q2%II2`s!ek5#HK zM_c7dGK}sqy0*)P1k-#ID1j=CK5vJr3v8e>PLceMirK^_(j zTjnu?iW0ol#_6l36qW|EsrbJWgp9+(Xe2?KlR8mcI~|kltA_EH<>K8Wpgs_l&2ld} z+HYQtYX1JP=2~fGMweG!^RmKq%~sREtc=alA2ofg-Es5dZBsi3b}BOE*hkO`qxRHEHyD5_8DNHrYmnY5p$4;hvkR!TOO1m|+M!-%z>Dq$58Db~nhXUfVwIgYqvFTp(ksF@z1#2A z@F`7v6mnwi*$DYjzokB?tMK7Zn$PmSLP{JcpcatvS#D3!WCKNuEY@$L-paw#<*XFCd_H!&cTDtsoUHHsbatHq)kHY)`e>$Sj@!tbaw@jltoL#lU(vX=JcRb! zL9uQ%$mZ4b=sw@y28BV?e&dw*X}|d1bMXyCQkKv}MG5BuhFaepiK38>)@tPSjd!;rG>#r<%f!!R%RZ9t~Y=5yW2 z81a)U9CZ~;5b7F=kp94QNebZ0{i`D1vP-(-o;E5z@2iP&Z{@((FqozJc|j-kcGM9t zqy3`+PV@r#bSgkEXT!`1Fd||o%(4SmKp`MosEOyFGf^J!Ed76>Z!CF4*uW#%f3N;$ zS1Y6hH2gupnesnmL4G>mQRcr#_hU#7y|ZBeFNs|KOk_fh!^}y3=F0Szyy{NizVl_l zPM`8vHSlz@?kEn(wru&nFvn`16FOm$bhK^)p`3S81aR7Jt5m3z?aGa~Ri7clO0cPsaF916{OuTQr2f)bMD<^ z8CoHjxWh>8i(UIPw~i{9C}s0(jAedAAij7$KR|8ZfE|hJgyvy&sTfQ-AvL_X(mZ>) z)Ce-;9dv8W+B@e)LYh>vhD)2^1*lCX(x%UrqJNq zH7KCp4aB>N#2LY8dH(q=<+MO01+w`9xd zLVv?KG7w3(zBU>9{%tz?R2>FnN2N-XqIcPP%$CCk@fRW>D4WydbL!yy(#Ku8<2xn} zv*SWkxnG3r8X1eICKZ|J&1Q;hs@IMw&LQcae}6gACyq%)*o#1(!p`)r7U~$#jd$V$ zhkA6`rH9qxfIyu$*H7d6`O$g|UmWO8?Az1;x_wwJ>S=t_W7iv}d2PXGQwb^(nON=! zOQS$T_7uL7j8{6|8}TVc_Byz6m(ge6WTF`$*bzuoROu1*m($;!G&mpqzT`#P)naa} z^y76o&19y=5B097vTQ!E>D$>akj+>qteu)*SxCy*TXQj;7sy|GJdO4wD1z%dwT?uF5e{} zJ3R6_ygh`Lr;{~Kuv;2;2D5!YaIPABh^Ca!uKV;0Z1Gz;$^{uuoG>f2-{-BKL8ZH0 zs))!x?5B%Yz8vjIK2Fwor&T|2jl6F?H`IP8M56|g^1p3A27!J)FDDsXW*p{`%ed1%Q4f7V|vdzbqJT4FzF;M$o5Q}0>GytLOU)% z!LMJj?p?E~gC<-z15w-OXSGK!%e~yH2}>pIDQ9?p@#c!(`}a5A@Fml~@3#e{r@yd> zY}Tsr*C`WqbpYEsh*9`6t36ir2d61R{CecvBJ)gmys(#uXAeey&5GD2S-&5?+vH$YwE{g+kOtJw_=Y9UN zPO9h7MF#{EA#E7{SIQ!Y!n~>LrYfJSoqp0}{${80;Qx8&POTs1XPPiqh`5n;3plKk z2EFmO1N&woq%h%T*M3%iVL_=UW2cIn8m#NVF8HlWL!uAMc2~Um1tyN%tjW+#)Y+?1Aa= zKcp(25;Cy;WGYzjc$wdDb;Sip*&@dOn)RPJr~gK1cf0R}CGMd%10ck4wB$^YZGmj# zMfFI_Y`OQ5B10VuLAM72L0&zuU{qcYeJ0(~0~I z_wih1XMS(7kq!-mEya zagL#Z*Ew_1R0OaOcFeaU{~NIMuRAX>vSop~p|G}X0emX-kKS7C;EnITy_fCKQKXki zzr&kKSciSm6~{_`bd3t+$vNcaWC8tNRT;RSU^IUmc;AfWO%c;Ya{sI6ENa%3|vf1E1 z;eIVStlC$xr+j1$xCb^pB2NW`G%@2s7d4eA@uNFs+R|kKWxmKKG)PUEaq_A3o%S2( z7iF^_x9R~V$x&Ko7@0yL00cW2xs#;!hDw>x0Hd~_U1h)|pyd9pD4;)c1U-?E%Q3br z^?F!PkZtf121G%xQIc(GbM@{&p^n<@0^2fR0lQzaSy8cT-S7$pIq~eHsfk>F*zh+J zTidezY2}d(-b>80tjpP`1!(F4ga3cD>OZmqvX|pbCv9)ma?c%r3up3TxPAXN*ICJR zUYfNmF~z&ocaqkh*}ta%+g27uLth)td8@t}6RNMnZJyQ9rFUf~Qy_2XcX;Sxi^b{u z29O{Cq;(*Dj=keb@;AD876UQ`XX;Yw-Yg;U%R;}HWTXFT9e$TXcVm3Re7SXX^jHty z5s3Vfg9^2wAjL(KLg)kWKNTHxuGry*w8PS4SL!4VSD4mym38V&C?7 z$UG~0x#;-(2iMOPd^&D+F0Ip`;^1ngY54&cb_eaV@9HF(fufk-pURr{M6d1*=4kko zMwbZ|Y-Qj$t76Jv)E<_ztd})?ZJ!R5*QlAwGM^fhFj%M0v}NE&3%r)0^gb*Q8Ztq{ zu2`J-DUyG-lWaw;g>lvnjE`2gL3e2zwT#;tre+DH>^=3b$<2@X@N^i-_D# zmo3|3NxHm%35(v zgvw0O9ycg3|1^t2dCPU+VwCrT$TF448#{<^J@hH6?m|Xg-F-f#&8MBe?cfdvCF(QP zp~^5OAHY#$C<(q+e)c&&h$Nl3!rN z1Lfho2>{COdxDxZSFLyiQ}C-iK+vau+nA)85fz2%5Uu&Zj?&N{g9uxFcoZY)u^^qY za4ElfaD1FECy@zkKXFcSTk{N^n&5Q12h<>6B$$0&X2xLTIRXuFi6_OIJiF%lj4Zmw~_)W82T^Z8z!BBXm zfybd2bVOiP!_C%zAvQ5A+37)qDMMcw!tZSUSkXInIsKPc(jyOr*TeG-W7#XOdzpIN z_(e{EaJ_-zB_-Us`Qt)96#GJlWZ8*#bD0|0C>b7B+#jJm9+pWi09a$aFI$#HI36Ti zc(R~O*Ee=8W%Rra@MP#s3UJhsrGZQgC6KGJX9yj6kUV}OAeqsGBmgC=&^!{AXYYG= z%{@M}0j@G2B5K#UNH048|F0H%Chcm{JkS9J;tpY{O?Lj;Z8(u#^U8f;RJ9)bdsXy# zrw8PVep4s-Fwj8~TJUqmBNd>$Ut5Qq*1VSRtuCf}NKL^~qw**3WHh=MlV?Yp>R(q% z4ej;)1?i7tyXT>3H0=Bv3aE#NNcQGvwfpPa@q;NvA>#>6Y<@nZ^j&`%QM!uzLq?dblQ0m~U$ov}C{K=%6&GV?r=e?L5#S0mTm`P{{JITG zaU-+6RIenj7IdTHxu77)Q4;*f|PA?g?{Ki69-p9Y_3? zu7bH&LrlVH=>8>+AZbedG%zLbw0ToeAJ3#}{07)zBzb!9_B-&W{4V25#2 zB~5Ug0aJpP<`<6Ta9Nj_e81vUtE?V$z%2J zhCYckIh&-5h>nl{b8I{i+WjUF< z?<_q6Z3N9UWL3q+rVTvD)fH)7t;Z&n0jbAFhT_4~9<1`Wm*fgkR=jPF=W~_PhbRPO20UD@gP zU9A$>UR0vfA!@MRs5*5MUGA!oUzco0sj^_koPh?lA(68PFZb$7P=n&srMzFEXf9@o zHZ!@Wz&yE7jRHgRlvf;&iuE`&v?QB^4lC1|E`o{9r(44P5<7D%Yy28}T8mlExZOZ& zHOvGuSc!he?w9QxA1wi;z2#fgPIa}3m63hDM{$ef6ZVa%9X>Kr&UPt4fw(wHu=@7$ z*zN1T7eDW`?(th0a-9Ww$b<4Rvb557`Dw(w7oDBrlD#o`3pe#Bt=4-d0A5^N92NyX z`4?Gias(#Mp zw@(4I+n+Dz_GAH5rS*6i7=Bclk_IRR26yE;F_~V4^{00~njm?_b?je;ZqLq$sL=M< zmirUwX3R%E=cz+IuOux`(Q$RN?e>K=+xgo1IL9Zhv39%ZzHT!RN0A~OV_W;=T1E}= zG^ZVGf(@!3#l^(hmxzGJW%ukFdnokB9%HkKmX|u-zVxI11Do=m0KG%ZA7qn<MoFo56l{UkbT$LmS$Xda4rWOF?iLsp_-|8uL;i3#HXo6Ba2TwGz#E0Eu}ZMdDxDkTc(CXZc#o}|n~ zXkGU*MU<0P~;X={&P z5L7a$eYlZl{n71NR2^#nHT$>m90w0F$>*wJQ$F||+`>_PBbYUcs|jE-UBTmF+1o0n za)=px5nhUSI-#-BKqPxanphr726(GKy$Kr@+>bX{o;en-IG=M54{{s6w%TO7r&=5E z>zW?chXb>Yz<39v-~neMN(NG!$17;7cBaujg2lJl(up}*Q`$`dq3q{9$M-M6K=R|6 zhwPuMSn%9Gw-9kXeDUyt6>DCtLP-}##9alaCf_;$$xf z&2wt}LrIlbQTif{$ri02l^(^1)c6{uAUOQv((s~YEdDAedq#@LtTq$rCnX1_dj>-4 z%}>$!p4*~TbI0?>QNRQ?@)2Puah0Yh-`>hNE0~WQ7(w>zwKTW}lAbX#YVXi~tpd*$ z?fS+ws{y27pQpVwR>jPfPeYXHR~w*uZy?4!bPkv;gag4l(1Dj* zvQdB!4fFS#q}RNu%|hdSs~>M%^FX@gHI*+fF0BdMr^#BC1tCbBNZBvcs&e$Zyp9Sb ztvH?hAEw>{sE+4}7QT4!;O_438VC>w?(XjH?!n#NgS)%CySqz*OMq{G|M#lCcdNFF zdw1ELot^GEeNJzWW!)x`%kKYK%oKXLxrYQ$u{2fL#FE*K?gRssfwJhp0s}~Bfik~` zj0fWJAcl&byvavk17^10^BKv3NOfZpdcV1+Sg;}BHlU;iEsdsZaAF*iSYrjU1Qw|B zE0#h}bvN~Tv^170Ty2a}rN*PaJ z*}a(`s-za*qA?a5u3jk(Z|M?lC&0kCo7tFXW~fSw`X?qXPMbYes=y;@*m{8a;v?Rh z5qYf^kvb+S>?3OooAp=W?PFzmK2$Ga6sphA)0%Z)F&-C3_zpZ`yVvtttU!9ovGQ@q zjGfP}aEF~hqP+aJ+szS>rev`~C`e)4&u>=f!1<>y3qt4ZyE1~bj%eb}SH&M+HrpcQ z(DB;_pouES$PP*J=_$|I)*RA5u&tC$>E0CBOCe)GPK_pwQwY8I{q#XyAYStNFGm^} z96{Kk#pJq4oy#?lkLLVP~c56~@ zz-M4i-^~yUjy591pZG`6ybheJn;VR43#lv0-RCs{>5aw@GQOFoHZwOfSK#x=m%Em^ z;gpl6TWNfJx=%n~Wpv!~=*>d1F8dAx*&IYQ`?V7~210aDH|b`xfbgdp6B!tW7L-w7 zX~Up5^q*XOU*hKi8+lzyOqrPYeQnGTK+mFThn^y!F36xIFw7;`_ zDkQ79M_AMt#1xhmYkPLPzoq;7k{rKxrd)QfdWq+GxLeOP)|sTV>TWD3+6BaW@-v2yu`$SP1=%fCLG2YTSVV|_AG2}N@f%E6;6+Lx zC2-bwh}>Vt*9&voBEymjBJ`v!1-iw9GjQ`Rp2+&CkG;9mI4}#C*Ecp%fjjJBzzJDwetQAqljK|Q78K=u`u zb%BiaOdb%MM)A#$MGaL~f&}+3Xz<&fDimvyevQ&R-NL3NwX2h zOd4EGbiv<{d1jmc{BPygwqFO&7t87eD0tj{zxblN3RU{zS;&xN8X&QIBTPf+z4xIb zA>Fpb~f7I#bXUJQQFKVtr@)Fkw$WwUhj51ldN5ogbWjS>xRsHpqY|ea|??tNjb&G1xLS8F+^Ys@VQILFODmU`AC{~%N2Z$@T{8cKMc%*z^a{8)l31+3Z+3Hc9^bPV{CT#DsfwWfB1F- z3l04uQIm(v`YnaK3q{uB_!Vz!7f&6Th((9>4O;&EPCT7zrfaWXE!}8kq$O1jUv3rZDnw(udof#2Ym1>iG8sqoDhhlf z6%|9u^4??jt9u73D2r_GwA{cB1*pj9=^CEbc zx)%MHh){vq@2%>&;*(~6<&^-`F1f@Dmhn55`Ch{#GkaM22>mM|K|C)dD*=9IYFr)z z)lf3WLUjSe&-*l9_!w(8V|??~&KvvL$HhZZ1t^t(AkklGhssEx{$Xk}&vcL3Ss_Xz*B}fQhg~Re??`~n ztRhFLM#EzxIf`MiPKanj1D^bK-;_qZTLZv=nf6-u%^JL^>a0l8jj` z5WMP9@q~aJb2B`J=khH5$rG7+$y;M`Vk+QnJs(0>0B31L%H~2KAQ4I#IMmmD`u6yh zRltWKHSV;&fonwEBydsGP3O5oG^}mNi)#4fr`5TfZnvV8j++us0LegDz&ZzoHH8pq zWn|j+E#s3std+;kK}4&IS3ewQD3*0#|1B3LVUO%PjYX9Xl0Z#Ah)|)DU4C%9in*-j z#As1e2>KM4<7;00U4U8cpb3Vk(=vgF3Wx7U!3UaI*;%G`8954F^Jux{YcNw{oEqe_+o;kFA&l(Z43r3p%+irFZZL_Lo)L z32J=)&H&4^vX1bDA9(^toRulC19$E_D5vO{c7TmIW#zvN_4*4a9;Vd~vKkL|rPJ3+ zrV*oba+<2l?952){4BO@;aRNMEuWWPcOP6uv?(W(mRRrMVlfg_wCmp*>rRM8pFRkc z-s8CsjxUBb*+AN*5Q$rAEIV?02lCGQp<0nO6|wY6ce}M@V|}V)kpl*d?hm{Gh{@gNQ-<0 z*bBFavGEP}$E)=qqwQOK+`LQ;c44mSZrE8msj*88K0engLYL?DVL9XL@-FZMM`1HC z$+gCcnt?}cR3*hHIJt}Qv25?N9Jygn5}@$)%f``zbfjLwS_offf5Po`DlK-J79wti z!4JlGqag*$LVq*lyD(8aje{dhoXniOGFjS)hvj$+Nk?c?(1WTsJs{agU1Y3ef|<+K z$H-MLzV#4cwe{8LjXBlq_LaauoPAKe1F=&a)8&@k7^nMrf;$O)l)O?sh6~&#ycXf- zYUXVthd?=}EQ-FSE~2@msK51h)EUVHGKG~jnEO|^nETrTW$Iwrd1=zjHpgHBGUX_B z-Mg;^U{~$GE2_}l?nal9wbWuoWP2XD?3SQetZa+Wr0RY9BkpcD9E}{1DNC8-H_UoC z2wyx3SsO{6#gu&Y>#a{qu2j!UVrscnPG(-c^Gf>iZ->~-Zy{YtMvnk+WhUkmihbeJ z97`>IzF^BJX?5fMRA`P@~gtUJW<- zqLGha|8u!Q!IEe{^*aP0qrHMWPgjr32(iF!a+=_(++~@s%@7`?_@63=KGGjQ9g<(o-vf8Qt!Q~^Ia($JBEDcJ0?RnfE zV8giNX%y@0o^yG?KVPy2b=hx3CxtD>{#$QP*~ve~(nQ84@I6OFoJCxD0=-&+87L?> zM>9|ZV5rIz;&hqvzduwVXXUpCq;8Hvat?f6-QQXWAF{ZOq)+QF`Ox=Q7;WyTKOIc| z9HR+PO`fv3AZhjCR)et}Yj{4cG&JIITM=q0lbM$bF&6vvxn_ZHNG{3kK%5RRm8)jA zv5mW3kDh%LdLMs47sw8}G0MV~4!hPjxg44-?MLmCBzjA{{3E2g7IMCgf|{uR&qe<6 zcZrdKoP}K(3m!z2E%e!TK2^8fL>cq^c{plLY$%A-M$$e_JNamn`Xsu}cmQ)xQ;(?vr;FztvbH42rs0kQ*Ap87xs9AV2h0TK`7 z?!Tz7o)H$EmF4=~S=HdVi|1&8S3FPU-QQXC8lNc6uTQ9te`fc5-!h9G_xiD%n#ht5 zi%;;0xb5u?teNK}gKov79dp@{%;9C?M+NsPw-{t6p>VTrF8n?V+7bjF34vSk>(Z88 z@rB;D4`dU9@Ife(yA;9{L9mX>QUOdw@V$+wVkFL_9fj2aZ(jH--vtwuiV&C$R+Zo= z+30WdRV5Qip=KkFf+Vab6=b5KZf9o9)dF0l{CDMk82bn(_mM-4&&#fIp|6OuSMq=< zO1k|@H}WIV(s)?yE6?}Y=G*D?p$yMFZA0zxCHr>g4+&YA5sucQWaNI7njVY z6jf7`Q<`7T)7_xB*P(2aDgR5D9N#7;p`jtG$uqOa)K)aT+$ANWVxpwvWN;YD>a5z| z-><)o;5Ahp!l zq_nK4+AhicBVA<%Op%q9d$4HrGA~*}L!q4abz(-a?urXtVb$t>|9MDDR!T~8f{wPr z{G6WeZq#6xng&R5H2f}XeEP_yo}#9xIzKDT{V;2>sxE0^@g4CmGUA5MH_gxpMR}E2 zA&;FYD^#`AnATQA4>Td5W0YjTW?53doUV=bDXELpUbRiRk>`_A(NdnboG;dEF}&jP z!R6(Cb)w^HS|veJX=|QI+=#>fQ9bXC9Qsxps;&|Zmz&VL6gJ3lR7FIw+9hSer@En} z5wW%5?C>-Bj0Fl2M<00JNK4;Ah~tfpxaIh}zxYzS+M30>q?>4DP*;@FQkuP9uJj?!Jp?lUlF56kshv=vW^3 zoWr_pi_-difmg5cLcbHhOJ2NfDWvTtaEWCo-|tXxqjl^i3Ym$*BVtA{AC1-e&!#O& z{bRbM8=b41)npChyqh=L7{)(*La!pxVRbxmCw+uCw{xKZTm_MLwPSL6lSRfhBI=k? zcrUG%jmU;CXsj}lQoNc%vC$)`rolOX=pOgJaep2?lX#_DqPZR3+)5Y%rApbJX(~12 znwoJ<&a)*c*jh8yK;D71nmwM++cJZr2+yQA$r!xGuRA?{B_tA@(bEr^8vsY zE{Z{5njD7u8%|ph_*It<(a^I`6$y++2LFZa8wDz)u+W>36Z@rUsD z^Ga|;tC5$^6joV3@&`t&A1eQLK3aM+YKlu>@^nrmW$g7&$e*|@GV`ERCUD=@FcP4r zBE%t|*}3b39T-(l^2;SE^_7q5{=4QC@j+y${tk!CxmTx@41am>NAOMv9cd2m;@yCs zlLa;uPzET^Wy+@CO|OWO!Y?^*ist6q(0o@aU^A&+Js&+trT@nvwwE!0HEu z*X0z|k6nwEZ&k22M<=E6O9(jnDZGcHkHxj}HaD-1)=XQ<`lsBxUY3*63XXlVu8e*x zw|K0XZy$xt$bomE!b2Z`fxBn%5-1KT2sGe{Ms{Ly$X? z56;`l*=BS3dPS$ssFW=pMi+sT{g=ZtkqF+1J5ZAtbf!bb=orK0k-8+oQKY$liHcqm zzc5?R((mSMyXjJqJKuMmjo%t2^WNJc41SLVxe;v#GpGxoEovq1X+ab$G_B4f@;kY& zYiNx;Ri0wdFHmP08gov07Vp3;R~@;IAk3$FY+ju<|1A%uG#??XTA52lq06R$K+t%t zXTrX6dku7=N^$oONuomf#B)-#`T<(sZR8t|>anW|dP2tD{&zTnirZkA zbm}1l6XhdTx0!)!B%+4<<{8xp+(4C$6G`RL=N-Zrn`D`2s9Cz7zlr2OvGc?vv?8Nh zhQO`hWwl*iMj7kD``@EGsNphGzyIN}0M85PKc^|w8MCak#PXV${CiqJZ0t+X|6v0S z(yWaQxWsp(Cx)?PS4^3-s)1fSu2V#LsGq0LiW;03wDq@OMDr**qo?c5xSUh@_9L%! zyN7=(Duy2Q(yT1w@JK{;4{eE#a~^kyd6kdoVTM+#>3_Y;K`JbfRTI%8j(UUo>`nl- zzNZs9mOexv*1F4+^~!4gk9Ix=<8Ml?=vm^^JUbhOI7w03~FT z*^-056-4n{huM__QFxu@gf=E}Mt-EYYAKL4LW(1*dJSuvz zh{=+V7b7vT?++t)`NuR5w@jE@x~7PH7 zSg(0ewGhPqO0b;C7$>^E#HECG>p#cygcorD6-y`Tn&p}@JmTAPK(@#tsi?NdelUBcP zRFb-XSzNQp-5Vo1Rg}adW9!6qKBwk1oYRJSytkTGo`1+XT~ZGhwpdVdE86=zsF|Ro zmXikPzK;JLkdX^^RzZn>xVy|JE7oAf=^`$hJGJ+JY5UUVzb$ZY92E-#_;1b*>=Knr z#K6ThW(cgNSK1G?(@+&~&rFS0#x)1$xpvWq*?oo)?*-L62M4JiKM-=dM)^49q|{#U zgh=US3dE~Wnd%IjX9oX2Ho+ApP@J#exOZ&B79 zg23{H28Oyw0WpckCWD8IYzWMP+4kjz#1gKTel=B5B@%WvffHBMCvd=ZxxqItc_aM=9Snlt=PkBZpTIggPr`shRB%dHT%WUIxLBio_iaek`r5&7NR`d)i)~8 zyCB-1brprxeR1@RI>pIaMn`Z{ioHLGb10Zw9D^pN9eBB4_8Zg|blKi#ravPPSgz=& zVo^Fgk6ZnK;>zSvy*|O2;DXzdSumoi#l=B+G5uN4Z&;%+k4MeQ2^rUu7UX7cz|1Y}b2u|bWSgkgfRzvC=(7Ur zc{*nj(k}fI?%FK1wtlg#a_RB7mR%30i$FwqSSPhrB{9@U{FF864d#HSG^l+?w&U8% zHzhPqJf;3gqlbm1NY1g4yrG%Cqcd_V9`%Pf(XXw;vL@$Pd>MxT@sK&miIncrfRd;I z2cvDPsaPIEwkWPjpI!l!&9OYYTK!C+p)vfiJ=tD%O7YMuGO@Z@e&S!NQwqF38mA<$ z+4+F(S-_;C%v9wm3C$4B{D|)y99I~OfGis~$NHn~-|TEvZaaqP#x}#=ebDNGTb%H~ z`mepC?9bEp@VKLP1Y{K_ug_#YyueNl0Q-xFk#i$p*Ikpn{aVopDK?&Ra@-fFdMqBW z8!NP&Df4re{IN-p;GTQB#(lBTHl^)B{g-i$qj6;n@6*UX^sdY-_M}n%mVCoAaf|oD zn=is)03yIC>5kh9eA3_2->9p~H|cJ*++KrPQ~MXB)8|%Ip=AOV6zvULd`U&%R}Lo( zq!(ikH41}+gKxtU5FVFHyUJ2?qFLpt`ua1P$!X>z&q)weVSo3(W}7_A%!JlHpLiA2 z+x!IClKlR@Hwy$6m2j?&gOXwsC-L2` zT)OQosS2u1$HEG80xAT9$4=(+aKD}7T|iIy<~H6I8yE| zgjCLt=Jei_%Lq7f#u><9KnQ2*X|Bsit{(qO1)t6ZEP?#(MUYwdL{GHW}xxl7EN z>?PJCan|SJV0qc~#a=R!Fsl!o(4Nzu*-Qa9p9&l!5Nj)X-y|>Pt1yd}@NmCc+-2Vpk8r{K~eOLkG&+?UfVPo(#y zf4rbZ+dg9oo~3v&1u{%=B)zYHD{xW#yHdtS#~obH%E}9cNQp@wPopb0a_e#b^?-XyklmqmRi=(go^&We}NV-aiGW&=;n?Wr=grTbm7;I>Taj1TcI zs7_~N+hp%G=p_~x5b`x2A`tYjNoXhpJRQ(Cetjm0GerGCiYQvDW~X3w)HOF&9uVnG zDC>4AfS!y@P*XTL|5sDc?%K0vV0kX?Nmud@J+CREi|>nr)Ko#11cMKK7R=)ph=98^ z9fXT&8Zf`Fg!)DJT+;4&zo^#W(lBu>O&vX1{-pj5c&SAO`4}r;;rJd5L;_$vJqIDQ zso$RW%l+3&BVzX6@Cju?QMdQ2k6KFWGx9zAvUFf*r^_1*|;d zkm8{7+||NUrn28*_3)m5+bn**@NV>sEp7;E2{>gvY7$Lt{*onv1H4<``_;JOcX7=! z@+kQTd_z|n{6m{hP6*1F!I-0$+ddzIo&5uwDkAc{Aaq>gf+k1Whu{Yp-a`I2cz@Oa z)YKx+2Ra4NRxb^H%ed3Es=-s1mx@qY|6hRs&$;-|fi*D-+cd>N{o>jH&rCA~Hmz|d zd(0)|1h5ed9?sa*d{&1*0L0Z2eD(MFU3P1I|D^hK0Kl{K9K8ZmA@D9ij)^0i(5Z&+ z%W4&qn+Xc!2sf5Q?^Fe)_Osc;E4V0(5D=w)c$);O*p?=j&tmwUd;tM z5_hcWo-rdl#d$o%gvYjNZEO0D7~XVY!?K#$0786U3GJ^!W@onw%?fN5b?e6DtHkfm zQGnAv1Wkj1J~Gl+kC#g&E)!a{1sytUMR0QZLwdJl!2K2 z`T+UPaTk)tQ4l(7QCW%ruoiLtxEP(rAO0Ol8Kt9|!XrVDu1&t}(caWtMkV>KpPOt7 zMhKR?8dai;GST3(93n7D4(~p_>p^1_Ope!_{ofA(bDC`7_FEHqaG30YoorKkzdq>< zTV&Fe_dVY+yHUkt{gn8MRs5*?uw1TY9PU>Z&@=$|DiPf1tHAICipPprRc1j*0vXefQ^N5=9XZ z=%Os*f;53a9Tr1=fc8L)!!QtkXh2V~`g10_G8OrX>DZ(gT_AWwbt$@1Nd}*SbG<#X zGjA?GWZGB^S-?WlP8`6(6<&W$u~PIwX1)}G6W?{crks%kXeu^XQrrx^%zuJs^GW_q z*MZ{_BwP*X2|L5V&9mTW1475d41_$b23)K>4ZFRsXc+G{+x_uArCAv-uK7tHWEh$~ zw66^KE+~kz(S?{W=EAg$n z-!@Xs3fas>>p#Z|H#(B;<^HC#$|2XRmq&$u$hDN86HNQ#Y zj_i$s-r42YlwVe3vNNk2{h%quZP9Zq3>#%?{*ALmrIs?HJa$o)N>3Tekz*hxa-Y(A zEEC7NaJIL+q|!12#JT?wfI~VI4r)Ix?FuKzuW=u6-e1@+AZHitwrCAwwl!-F<$5E! zY-Hh8FJt)Kt4QS3t?1rl#s2<8ZGP(qh*hWC7REIX#;ftnHab_4kB|)|#xCvlCt&Z1 z9sHI9=hb3mHW zqI$y&Rq+pLW#_S&28R*5ef4n7&ZF%lr=kR6 zNB3&8+#ZC{_;Qi7IOCL8^G&h9SN3& z^Sb+tO*?rY>WoVVnb*c3G%s$#7j$6(g-@jEd7}BeHybL#uT5>BdLXqonfo z&Vby-sj_tX&6*{U9C*Nk_}fx~S%@!j1@G)GSnr#-)RTnZvh$h2wXI7F&O z(wva|;*$h(e$GkQ90_JIh%Nu`t4NNJPeFOP=fQdM zxeM=X08%u;VlqaHwYFZ@{5b^TtPQKNu`whhq;d_g&)NKP_vq+hw=d`%12{I0N{-sU z+c5;%oOd?=cVIw3AFwWCxtgX=9$33fXq8rrshOGD|NTGv^Tpp!H#`6P3K(txk_-{h z(V~fAK&G-z=MTBPJP3jeTIme-v%P_^B|7Ik;M&f4q6;v%d1teS23PA%EG#Vl`<(!n zPdbe`AIN6*3_)@xK0KO81)XlR*s0Y3X9ZfQZ*Jb|^Y(b2tO{Jg)+oQApup{B+wbGW ztW-9eS=H<9(Z&Um;8Zwl_zUnm1`5jF@$vO)-NE%%M@7qgzE9ZXq%(>+n>bR0a@|8u8{e+uq@cSbG7 z2fbCSoBTvV&vbe^jJv4X3z-vf6mPsrox5G+3bX6C~SbPwsfCtjsI?T#=I430>wYGAqot8OP>fI7WR4532&# z3QF0bOjK$qzeu;Z&}hKQ!Bi^4;b1rjfZtEuvK*spy`=oBey zX&G)vM~8gW6buj+-@f z)UU2rk5;*h9!=Lw>8aHtUSFi8(Vp^NQ?yB2&1v1#x(4c)&+<4MS#xW1M{{>`-^!C& zz-K7bCRzOM7<$0C6f$A?ks~UG#K6gukQ~}^QdW*9o=q3qwTT;zrDNuIV}Aw~Dq9p@ z^`}wYk+ed2;8lYanrho(jPA<@5tc5wLGd^-6jE}Vb%?cB`J@YZNW$Z}?zc9{NeL0a zEv0pXoFh5^E>|A4tbY;Kdzo3`ZM1B>!~}Vo(Se_0GKd?&MkC6^0B$QB{Iqdvzx|eT z1{Dxw)&=EChA0sj?zx`JEE{e8bDEXQTYy$_YVBDaszWW0nT_okrPx@YYJrQJFfM_B zp+hg@_#9{PL|f%*C@zMixQ~Wohyj~d$;&ff-X|slv0u!q3N#}9HW(s=iws_9!~w^d-J&B<&MdZ)iO^g zfa#wHQd~*_X`~A9&?SK1e!xVW6;jcHG1pejG?OF|7f8PY1dsLRlq27DvwC3oiHFSY+0Jo-|6(@?$~9uIF?JuIu36a%PHFi_$S;p zG!FPuVpSf`vbjDeAWJLk`8yw$VifNU`LvM(ML{C=lin8ZI={@IxFDB}ww{D`O^uX# z6(`uN0#+Fk(-DcJhy43khHZZ}{31ft;Tw8K9$%lxy%;TyYiy6Wdg&8T4gG zt68ii?i}b{2>?CIz-+5jMgwYJ=e$vcFS+j`BHTR<4|x(<1g|*KD+9NFFHf$k?B>x) zg+9$a=K`5;%pX=u^>@l<6p&SM-xsx2B}d0XxqjVc=B*!nc9(|m85ht7QX)BpQL?}voqOIRT^vmQR<)hAL3Cb zJm3NkgTote$Q%bvY7KsA+d887@~1y;S7mdHIiY8h+q|bW%ZON*tg>_fK)>MhU(~m# zD89ijeMw4}m%tpj@MHOYai1}wpY?;VcPFM`Qyd1yDipgh2yK5PRTm3gSJJgqpXRK~8=v;ogsAVdkuA>(`6Wjzy`uU)bC5Y6oQyyB)Yg6U-Xu;fr->i7my zaca?i>-l_Vdfr{2S@Zh5o^?1odpcE9w0C2WIvbzDatk>(oy8#*Xuwh6jQ$8shG>>x z)8h{DwjZ2i?WZc9Wg^DuXh<&orEKRP)1ermM2MQQVWumR>oGxTb~QvZm{dz;6>i9= z!XR*GVE8&6&PEjH^lypwQzo?(3%?IsYn)QW3ewyZ%}`;23k+9ngzrZGQ*5m3gz{c1 zjos-1r+H8B@~C_$Op2=HDo-_C4H3k`YtS{(I8=6Z(^U(u23Ge|wsW}HAhNtg{&=>K zX_eDgu5yt2-|CgZ5(DgPtD;&tO)ViPx};lwcEnKhGZ z?y&HEFgd*`@%q;DC9(poWt`sWo`(VVlSIrY%ZV|A8;h8S!;U&d0T3tMlyD_Tu12N< zaX{wL_i`cO)M9RI@Bz*F{MNxqWz}<)-RwD z@(rgwVTtvQ|64Dr62bS-^bR0vWj0>%3lZcb27$qzW?M#TJ|pLJ32xp_k((060Ls=V zukb&hM{GX7Qc%1N6-Qiz-!;~I3quNL;)YW3pIjZFYe zHW!}~k^jCm>e-j$3HQxMi%NJPPo2O}2dteobwOFdkVd7t}vx)^W&Jn*z;ozpgePG6^fKg{6=g<>Z9qwO^P|&YAxhiiayJZFLpEu{k zB%b`+XuB9q+b=GiXupHAP|wZN=q_YAFUj0^eV8X)zcVc(`ve?Jx9mhWA6@a3IPr zynjY5pPgmGx~G+bNN0SU?+~>bpTG>mLQf7fpY3SpTnH*p1g-mG@bX7=fhrwVJdb!C z-e{gR>r}T&bs*2a#|sNNYE!Z3{OSz5y=yK^`Gh%zkc0$9QA~>OI__w$|5R-JhpEQt z3ylOhZgxFIhy|YVHKRUwYDQ;kwHby%69x;0j9r-u&Whi^L68T0z^e5bjxO5rgC8ih zi0gUVu`J@P*cwmLf}-vI!Vc&bvg7!Oi(@hHd_*b9OD~PPh@20j%!z?wez(C$3{`2g zUNBZyhU(buViC(F&Jl%u5qiiNvgyr^Bb%}Rr2t*Km2gl~De=4?IIZh9=OZ4z)+@Qy z`qFjxn7@|!NMtAEdfi!-D*YP0P2ofUvtY`bA)|3kGunn?}k3kQWy*vX>US`E!(zvXR^sphD= z?7Om_wsvQapYi>WTEj}t3x&V9k!udB@uu6PxX6}?`BHYfQy@I>M<6+3@uP&y!V z6c7iSxYhUVYaZ?h@Zu1v=f!g=7}AQ`Rpe?&a`W1qrB^Q_qtV+9l@&+#)lXWxRr02s z;1Fm)5`z0GG(ZU&|E4FR;T+Zk)RPCqk1Omo4*@X=4a-9rM^ZXropQ-Sf1lHG8Cgh| zc&x^DJPDaeF`R8vAkQR_p!j_=t_qMix~`nCZyfE-cd_Q_LXpw&Y>GP!pbJT{@|Zem zQ~wnRE_Ms0`MxGo`|Aqn$V^}x5uQsfbgAawEAF#+1dJqw7>xXM+DZ7v2CNHo_Mwl| z_;6tInZ7Nb(oa3Z((yGk@`p^_&@Low4h4`>^+6#Z{9;o6lKkZl7l$E@Ouv}hbCQiG z$>hB@H3!*?3-r7wk{w%s3COvqh!Uf7qP88)%99fk&nAc7&0S)ua7Z|OoJhlm5u%A{ zSVY;N?B_4>z<=@c&+f#EyongC-mS*RW@Owlhev_0dA-WEHw|CmkH!B22-rY+Xp#*| zp^4254W!A_yD)5{y!`Vq!`jlMn^^D#MrI3oQp1h*0xCVBR%257jLIobBU$OqSt`-t z6{MZ06sa&^KEUm;h~d(emb#STu~%M$RUYU<@bmYtX?6W&8)(M#vloi5jHD0LuNH&5cV4#Z&8wO+apY+}fFuMqho|6O{ z`h9L)VXnJ`7=0fWCv|o--Gv#9FDoZRzG9*-FWMN4tiT(L5b?(5^v(TcLLni!_#l&z zsHw$c4e*2?G4mI%2`Zm;!8v4gVH9u7@l`1E_AgmD6r-Dv-0hYSfjdWAa1H-`UZ-0M z+Wo^ISgdat>v=s*S9HKX>v?FbozxAFYF5K%$u*3e1=l$o!h9}(yW*q%og|#|hM~_M z#TbEJmAP3MuZCuQ8X|yF0gD!_zAr4NFdMXAB`%Gbq(o3pj)w(atl02Z z7#=6JQEX|QfRO692|taI+lQ9^KFXVelQc}!5h^OoKglvYVzk}qTdm}SgqAY(cGDi> z)XT%YROJPkPoYsVo21nx$nfK|g|KvZ?Q(tVSh51j zhpjN4s9d!XG`m(@@Y&;)0zd?`$DVM|poMUXMuJMqLOLs_2B2dyTBy@TS!<%pX+uXv zo#y7MCHUTOAzi$18DtPrF^N&eQ8kPw(s4;TEzng7sy%c7=;C2bNcYB4n zhI+#pXS&>wK}_un8qt+8%Z`QDc*xz+S^5StKd^Bk52m=U#DB2PdZ^S2xxdOfN>X`g z^R#!nDl^*C@581RTHS{M0hKBrRW2&Z>>@&b2?YZx4s~r*EbHSZ;RQMA0#%tqawxL0|-jxUZ zpwY;XQ`GUbIWs5NpP||A4CMu6#Wv95mwil~_9Mw+UPQpMog+gqxdk)Vt5q;oa4?nl z_>&~Wa3cT!tDwWmmHZ^p;r1XHp2_aHg7e=f`cwxYRubu>_`L_^RYr8i6F}Btqo@RL z{TBjQei25FRVOJaN9HwfzzapZW=pl?d7SW-?7Vw&g8Q`*`}a6i5uY%9Wdq0_Um@fy zEXlLWO!2e#>?0ZEjmiwonWVE#KKgTZE8bka5LDzcDbA!7&Hj!l9#}1G?Zm0uPYmhu z6YO5paxgGHs@ciTg-?zvmzl%LO(535wmXu~<-qQR@nIojI2yO~N{a5{7(Q1kPE}B4NaQoiuAO5I8 ze*(=YU+kW#4Oap(#M!Fa&i0ea;)WHy-BiUguu}kl*>fxqqMgu0_Es`(e}J=&C$hDW z)-|{>54-z)Zh&=NS}CbGC$FhbCuW3kmWl-mg(ogB*HZqQN)6!@euq@C>|SZYS*UJ^)JkcIltkAn-6Z zuQ*;ZnVF+`y^vWVG%4IcW~fOon?XH9$ZsaFdZq-W9!_dH;Xh?$-taGe?#@ah_&RGt zx6i0YeKW1$sZ*+VBgPb^d`=sg{DPkZNQTOQB$JW*Z3cy!O&0TIzy(7F2Or!pUqUaB zgEoVC5+?R$wdrj!YiqA``~7C7M|cJZWA#bGc|sr%y;9idfWxyC~dm6Ip45j|r9FWP01hra!9T+X7M!W$3-^Gwxz zU+tOd#SBAEfA#t=r?%NI)7F8kyn(%E(V5B@S;v=)00=!lFaNY8)v&b`C#-U7Rc!o! zjGc8oPl`E&kYEfx#7 zmwO-1bI$(m{n@s__4wPk>5R)9;hIBUf^b?`T$M$iSqz(TRa5An+ah2B^7ZjZ28`a_ z%a(xO5m(9O=9EQJhT)}ni@ysxTv$|9?7jh_=gZns<8+g`gp&%oeGw%qA5k!ZVgxQQ z%x&0wFs=2U)Wr0^s=EVLWD<@bnVHiKPjT%$o3wHP7C)(4*v`>iE7~v75cGqnUN2Y? zfx|CKc!BgS5qRG}U999GtSApH^{$~1SE*w3nn6eX^JL#9k7%PC$*!|7wns!I8I0d0 z8QyY-*QV#x%OBcOu{A%iv0)W)eM3JO2Hu4=mH5S9P>gR*8c{o;6^Q5mg1Iu9nU?b~ zEfWLH%x5c+(BgN%zeIQ1lfx`GsMy=4vpe3xJ#)C%j6b{}>=5^Ru&`Hba7k6B7T!B= zmwf&1qT_vL0~w=9(Q<>N)Gr}^!ttk7@zHYC3f}X}D+uamrK0(;zLOCOUon|d<~;!g z4}AOhLp5tTjVP|V55x#R%M^)gaDiZB6pstU)U!*hE8POtqO*~u+_7`4>+J>4mGAk5 zU+z!&UeT)aV3V42q2wx64QDCX^>Eox{|LnEVLEDfd^I8-d~cTpOMP4LO-Rq7m58O= z^$h;8ls8l}DhV?bD^Swr?V)PzkhZgKf88yqpb>I4-g_l$l>+rH5Ja0DN&3rJ=akO4 z=OKroVNOoIDfUwe)|IEVU)nY`#*@`#k?r`_`Mig$=5`f}?Xj2ah-T@^$jESFzt zXDyO$VQvJ)Rpm+}(K#725b z+&yW(V{cD4K#WpTgmh9v^F^~Dy}`ya4u|RBeXqk45do#oGZv>_Q2DljwOT=!4gZ%Y z?bHW^y#e)JHPWI__KJwgYS$gqA_|BUs{G!0S04ke|9V|hqyNO&8BwufA|_UDmnf!| zF^}hntPMe=%F{}{20R#5^kHtwqi~~=dB@6}iV@23yj>*s5j~G;8D1{WXhFjPd8G^x z1ejZ1B;YgRiN)*-f-b=3`N8usff_nQv^75=r1xazk^JRRq>j6&^fkDm@~g+5Y!!wO zVJgM&>VW#Ry~NALv4y8g;Tf6&J)$s@+cws9omYJ4QP!A#@sg6-o2`$zxjrlGML<{V z?N^1I=N>-0TqxXB&>e(W?iVL`UaYPMI+S* z$U95HAE=denEwI}s=NfIh;d=WG>ZLf{R(HI`o0fiyvEpuFoX0YXt~g%XiX(x3#Z-xFhIh+@_l6Z8~A6vIW(QBUim@DRuM)L zZq#Y;{R3DMrGI%3f5TM){)%i&lLU ze!Ub6kdH{H^l?cB)+!0qTc;$SJCgcE0>;H-U9qpdQ0E&So*|Ytgc8=2`Ohoa!O$Z* zwYOHR6H;Wss==BlF&QedwrX23@NX=}x%dqbvOji@0!=+BT|1|JnixBD{!9A0(EPEU zYI9Q8r1V$lD;Y2d(7*8vR-qob&1(l6WF%uT;pB$bgrq7>H}-}lNmxQ zT&f5l=q5yoGwXhfORe5`@_Uyl|IM`XEWAAAuhONjrWY+8GS)0v;+gw5vnT3=aeWMo z2-Qx%PfKwXJ2E%?Od64!H%iOBW(LWvvqzWeqaRBoTdlr`AcOeLO~f4$g{Pji5h|;- z+NoLUY@Ak~L01C<4?l$fk0^y0d7_MHupNlT&}WN#6M zvLTxSFzpMepSJPamGX^BIN9yVORY`%LUJ$JwIkYO2Oqkll?#&ISLr;4;|7Q>e3K6S zjY|_bO3mH2y$~w$^qTlk@QeD;y>}&?+(gcT+#**nR;au9e$5YdpPTvwLCt;N)H)L_ zGWsPWch|*r^5I+VZc}&~FZIXB>stW}%mq9QuXIKpwSmM2Pl}3KDEW>d87z+*Mv=*WNBCNs!>97=E)N5SmH<=?GidEW&xq?Zg z@wYBhXh~_w(G+~KxbQ3P>1-YxunT}mJ(#Nhvm;Do^ZuLh|NHEJz8Iwa_CFSZ|MM+A zDD7y#E%3j8;J@GINiqQ;6_fyfy55eBiTS+MPQt)Y&fSEP-O{qMxahLh;xczt<_-u!{pZWv6KNC{rlhw%UbhnfkakFjYZb-t*7(`Zmb#>N{_#RU#2I~m-jC#uKf3O2S4hzg8E2Wc+Ibd(0b&=W_{zPjyZdaR{Cm_?KI<>?FaLm1nJSg!`sgp)QP+E{vnN)K+J<+Jb7j_u$-JiCQlOzHxPVl~bdb zUim+Xk0zPPsJuPN#;-@46w!HAiPiS)$}Q!~Vf_UaNu~BqntLYWC;F?aP|2o8!^g;3 zEJ*HGxt*t3xQ9^`+f3$p`z5H;nOPT9@v&8eIs=O?tx$HN)PyGPR}w?-iZ2Z(3k8x% z1~=&s8hT@`2m~I27G=w1uQ2AQrS8(q*71?yV~0EF-kaCgh(5FBwB(EYG+G)eG;sUR zGE|w8ORi+B&YH4)MZxhJ^(*c!$D=Z7$ZmgK6f+uj&gN1XHl`NrDr%@bZVxtDoKc=f zD!o1sTTXz3Qh0H=Pi*?Z` z$Ndo@TYs%seLiuE!$hUOhrMMP{Ef2OF{Q;s@bG4*o>DX*dMP}erS%NQJ&E_+x#k!) zx$>}P>sLT!;_KERKQLnK6l=Y^YiP+B{34jOuyy+a8sI{?oZ1V*!a~QU>g?yM9Fx-; za?Yrzeh7*L($3e|*5WeLU(SWXpS#4EEu|I@T!lHFDD3*5v~)0#Y;n5cy+=X}q4^;h zk`W8=EDi*p9SByQFX~I(M2&VVh2nM~7`D0cZ73vhpLjdGeg@Wt{;hzgO3W}`f8OVF zv6>3l>xZ9oKG6TOglgJ!c?M_2T^SHH8c$|2T*&`&2VG2wy8QEr-Rv2{qs|v8nO$Rj z=!J?z=B@a;{ln|a=_|Xom&+kE{HW%-weM+z3&{}!Ki5Om;Xcj#D0NPqDaoW4B|)6$ zphi?;=Tx*P2M~e2Ox&Y1*&m6JB4T!AXS}_`x7#EXU$HBA?ZJ)35hVqHWWyJU0?VWC{ z9zkAFsclRQuDg~e0n=xmusr2zNY=NtUINVRVRlRUF%FU2tt4Uams|lokR_q0yvo&1 z*h+IsbvE#PcZ?TL6hIRGbJumDKC~-M5|r7eS;$DtEG*b9CfIG`(51f3==RLTVc`2i#pyqf`o^%l@Z}uXvdw$Vla;q=E?w zG6Gg!6ZXfo2F`<~f?=OwO_mxk+R1qD-t3jHq0XW6!!q?e$oyZZi2VLOMvk)CepK7t-AD-q-9+ zQt-W#6S!=oj;T2B*u_V^w!|Sdru(k0O3-Y?fkq&Pyd|~fmZg9l9ghgehW9qX#ZeHCz1R=~h?? zAAal+e#Nf1y23YrD&YXyejkxn{I!68>(f7*I)(r$V#Mi6dUweX(>@HOz zF&&Tne7ni5Z*4OiL-6rLBTbT$`mm0P&apJa^!4o*4HPML<%s}4#6E7mPsVU0x^DD7 z4vFa=RY6gJqFA(hQ`h~;Y`MAfh=jL4LzR$`1L-8P2bY822H`E7HE%cRN8L%UUou7~ z9WD~lN)rjw_`~8ooktndyKjf7t}oyRTD2F`RlJ3l7oDkfpK}n3o}aLC7c3|)mJ6gH zuKWXZ33;5Ar#a+jW?t&`De`?trCT0wR!_$UYb~ZbAN?VG z6U$V2Z7w8j)dHk7o5-zrk-{?26R9Xa*b>nNMV<&NdG&Xyw7+xen>P!{rgOuitKxS25Uzsn8Y6p^}-$Y zT9zGg_vo%T@YZJ+Y%d-$*oBIXuDV(p!c<;gOgrK;dp-WPT6sbcsRQGETHfmsG+Nch>3zwwbd7M$fg0vk?;A235^Izv3EAIbLwet!rM<%WOhRe^%POiyA*-zeJ zt8FN8Un}CZS3dsM=x%MHXcQ!^+0pF2Iklx#_{5 z*6<>xB7M?>I3C@Rv;HgH8r&^f_v9KYezBsqXZ>o?bCxE;vEHZ*)v!)PxlL|9>I}+dST8zAMM`2N zB{L?3LuDwyE?cml=`l=2p)@#ODTOKmSe-7W#)L;GC1REy8yNtiz)gx0dvlQyOh)Bmg!&)VI zS+u}fo^M6KqMW|_E;g_^B1Gyo2~&Jg|J+m#GhO#xfcaZoybOl2JnV*BdildAO_4X>&Ut%jmw|+q!zi&AG3D*CxjDY8bWMxu!;B)x!yj{`??hyf)tRi0x zk&wsl^7W1P3PTeK+Ih&`)_ZE$l)F82c0G^2=}ZFrBDPY@#=8KZ+_MV2rU>Lb@6ke* z=4sMN(Yvr)&#r+Rb^9U=53A*Oou0>JgQvYruYhCD{G$a#f|kMr_U2|JB6&2|QGp{EgtG=Y)MMH1QYbWwB9ReIjgSe_e1TH8+>8&SvH;7Q*=5BG= z$!KNdyD875VUixbULWoIincTx@8I6#`CZtGYmm)cw_c+B$%450<(5e{gG|O2?uVKK|IEX!>+z@Uh_J~)lsM{U zp99V$R?83h`(v4h1VW~KWUL&?msA-L666j`gW~^QbN&yp*GtqMLn&Hn8Bu@HzV_mP=o?Jw3 z@-`F6ZeJd?_(ET9_q5^jiMJIZixg7^@K#kH+BCR(IzuiVp=%68cA6JP$=x~x9 z`avBUzn^5S`dM#E`1g*^*a8#}@(^W$qN4bsr=+;VdgW{IvcI=;kaX&0QWRkPHQUSP6Q4T+bxc zSN=(9*7yU?&sDc>$Vl2&1&n=WSKPbhiDd5(1@_P|@Dp2lP3^L>(#)37V{FnXQ+cFD zS5fR7}a9PLAJB2D*{RGX3_yO<7 z`Dt9GrJQNp1#==+QmXA_4tM4O6p%d4rR4%wy!pmyt;l0i)Cx2H-ElzpqVv!tadeWt z^PgU-M6JH`7Jlp5m#|b~M51n&Vl>Q(4N7k4l3o6yZ%uau`(dO;YY!M+hq@s4L?Z!z zlcG_B9h8^hv{Kk%d^NfJLUa3p$)24kp7~+|nJk#smiRuaebAy;Q@?xm0rFwua%gt% zM6`SjH>R4M)mU>VVk<~HDwkm}BZwIL)6OHU)OWY@FNb5XsbvWnBK57hna(DHb8Ys< zd@(ECi5HPtTsJvp%YWx5i=8^Ms!`Jr2envexDEGO>qWKXBPEs7T9`b319f*Cqp`(sE@7u0&!G< zZXI(X_cM1)w zKc0y7nZ!nn^Rma^w{Wny%F0XqJPt(aab3<_Po6^n<_gQ2Kh<>yc?DmT06qDj-HyYl z)7I9(fZ5g5bs^(@PmVtc&)6v4TIi;(8^FQrV|ZH62Q2jv?DsC31N5ZxbR{{6PfdK8wIAQHsBfY z7ym*y!FH{o-S5zUtCi>;*`ry=zE*IF$)b`Dzw(I2j-w!7yM0$YP@M_f)WT`c!G<{2 z|AII|Ki8WO&!!e=;!qiQpLP3+rWuunLL`d1BHAN3FCibRP=Z=3@VwFm_rUHDYLv>Y zUGV|u)C=O;m1Vj=O}C4~(}Dh0anE6g#l#R-FUE0F&(hpCfv(e&krb4q zH-GG@$(P$KHTR|3()42?9Y)B_HZQDyr_NlNUs18`EigkM3&Pl`xm;~w?nY0@M@__8 zlfPGr#==BbYr2*1+`?kKiZ|?W+$OVvM1lXStVrP)lK{n`eY+O>6r#l{*!m2SCDTUH z8W^#Q@dsmG9&J9#`f@4`TRtu-I#$E$oCFrufGq4Z%`D%{Es< z(`e){=X2phN$T6I*ppZvdwmy|CUFz5S(D|w9yS9Qm>=$!sPyehqZ5vA_FF{%1vgE( zT$qGS#T{Y9Yv)KDVdS`MF{S=)$1rQYRHsbu2txu-0!}L{ zk4lTq_#5T5ZEzDdt4W)G-Jx1y%q~g`c38jSA+-2@_7Dl+{VwO>fE<>d_<;*7g?Hy-GJR|Is8u=bU%!9vL z`IUxKSUUoCy(^B-SiNk}0=LO%WGpi^*LRYBSD!{t9E%#VeVs2NcIv&5vo48a2Y3^I zD`J3L_OjPV4xT(sl#p=U!JrNd=IoAf#x|b`)4bC6{lO0~7KY|Kb{|}tid$L^LqUYVGbIu=CsYWdWtz;~^|2b8m8YsjvR}(`q)e z=!};Bm#^HZ=j_}9-9LiH)aVcLkc*9*lvA;nyt|M@gS61q(FG`woC}63Ju+7C{JE=| z2&XR~S|`vgBT6l<bQAEg|eb0QvVY2 zYO}3fS$I6A=bq<`uhDsOt`r?8~4rjDZzttbAUftV??B4xkX^Q1LPNtneUzd<%BW>JwF{U5ty!IuN(ZXW} zzfbybUoV{elmG=tF?!@XYPj!UOI^_e&+fiehGC@7V?Qmh#?DTO;Gd+Po{=Cvreq+` zj$|4Do*>(&d*JJ3|4OG{ve?U5Byq=({we@2ddjeqykyPoH zRb=K@KgS-f@*4pRPf-RJ3b}4vZpf75+`UO@T1W=btl=%Sl<}xT`gPlCE!hY?t?gah zZzWbGw`2Vz5ZNf_!zK@)i^;nwmjUqks(WrTqpV$Av}0 z4l$g}3yBVyoIhMEmqgJ`HUS#Y|v70St z6L4==O%0?&aM=GygsJw$t{g#-?o|#XemmbJ4vCjf#j>0}|7CSU^V9eF2d&TC*r7*q zd3%W_+fuCD7&@=?(9qPm-H^b3pgRyI)40zz!O&f1p7Fj@n}}Z}+AevsJ4$i00)kSVv9@-Zl%7BZ>O?)42W3Y?PII3BMjZ?oo;On?G{($k z34!#CX1p+9G2;ByE{Nu-49ckCS-hIymKoc3*W3$J||0^ zW$8O#D7xS_y=Xqdu|$PPLHx1r-L*J$#tT+4{NvRm_tfOp2z%o;1fV+g}n3Q9!AI#=TzQtQg|o#7T+7e$3tT^^fV9H9{6sE$LR6y zV*#=0X|^^Om^0IV-!nq#@|r%9gDH{>m?kZlT~SQ@5d8cb~z-;eTOO#&)>2=Cm^8qJ_{NsioXH_`u2_FjGD zVG52px8Gt``8sHu2Rtmljhbw5hH~($S}>=DrDgekX`^aVVOK5boPI_jRhvd z{soqcpW)SV;{?Vor0T8-#dmKs(B6aXHe1Lub-i_wA;g2S%NlfyedI&Xf`|uHx^E>z zPzLJ>Z%|^gH;H1z9hqCFr-<^rKJx7t^n;j`vmKe`7QDl#m+U-Z79y~a{4#FP;q>W* zcE_a;G=dJUX-odVvW$!@;yhjQ`uv=4wn^E|i|E~zlS?}*(5E~ELP>T{HeX~kwE~+! zYHHbyN)sdCR}0~6HH8Dl1}hf^l2=^XE+tDpVNgYDEZOIQH*JFj1?G$3?!;+s(PR7! z`JhmDlAs2tG_xb=1<4$8q;8*K6^GzSf6&GkD1_qEFDS@x8L6Uz;32w1qa-Hab~RDM z7w1RLQl~6^37)^xM#0{HOB5rwK52U3b zbQ4b0;U7oP6E+!AEFCT;WsF*-@d__@Td3WBh@Alygj>0xf7o})C74G3)XC4Sa!MH9 z7ewf-e4_B>a`=7CKIUy|k23)HgZDwX)q{gNvon*xX#2|2j5a5w`Q4%f8K)_iGxDX5 zYgJL0t%{-}v0$a_>mMv5n!a0q^JiXfqgEHcojzoUeJEV7+cE@Sbr=N@J?&_h6;z?8uUw+@3ht_Qe(H5Y(c+tb!7tVHKi6{4G?0y+y9QP{hHo5O>cwFt24 zGy3n7b?Q49yem)342ve~L zV?%m#i$c&>*dk!Og{1iILC>OJDexTNPigv5a@#(6q(;aUe0#Dv3{iLyPc9WXd9&N? zMvbtbA6bVmoP%^J5&B!%oK5E!2X~VgxhV!sX3F1nj3ogLPpIdQFviPLYF|l>eG%0H zGT{B>`udNF)Zgdz`%O?*P6lA)zPDx2^&e6nIhSAtaYj7Z}?=|Yym3>Kug#P_8wM;ZEESx+$;ReNO)4)sKD z=W2?NJ7J%gd$WreUQ;8piSOKeBFt3%jnbnWcv=9PRj=h?>k}1A+_rB4!}a95kCS{u zuv>dRb-kY^b)5{YnIahl5u5`9CACRP_ihtk%U?SWiK=P^cf4?!#_oA~FzT`G9+><&Kx&bRN z=anG_DO_k|^>a`?PkRu(J8UikNCcFe63F@h^We8=KPGAWPXX^02ml(6TL}Y-zR@yH zI53$s90UR!GC`&r1hN5-&?18tPVSt`+(`*bS(O>0v^ekcI+i0M2dO~&Fe5Milw8vE zraD;)FE`6Md>s}?PE&EPFH|Lr#kOP!*{4N`u=;DV)HE@Yuh$}uZ7`}3r3FO^o_{sZ zX{t6CRj4i9ElbFtjp*ekr;?K^U9a(n95Hw2%rlPWyXgG1o-CVYpMMNFYRm6>EA1NB z*3Y!sdD0?e0iX74J+0@L5N&zFZ_r9-X~!-?W1b%&L>DF>VnoZJ13y_VqQXTCuTxtT zDLQ-iacxRcN!+*eR#Ky0dy{-Df=Lh@{SgQw$1BHIJ5-}>kxd=0CVYlP5K%?DYhkyd zuz+I2%7wKrsXWtw9>1J#F%<0i8;GsSNC!tJ+!g*&Tc?BP&f5NT;;E=1p`kKq*fpn< zmtXuShDM#9cj`KiO>E2>HfiPj%p5+RvRHmYr$YA>9gXliS5|>p@pJq*a@kv$&8}KJ zk*K=MkTT&?>2z<%1gYMmOvlOhiXffl($>EgJ>4GRtsTTBBeto}TzvPd!T+nwX}~1* zw^pcXa!ocbgoy%#tu3Eb#a>wn2rl=R(aA-f)9-_((o>#R9s^oG7}DV6LvU8)eCvxt z-fnr6DOYmz{VqT^h;U7~aC~=Y{q}zU6Q=?D^=d>9iFdS|R(R_u zn<~PVI?!sVI4|47r2OKfh)hyS6U2O-w3gYcbzOz4pl)yN}EpIp8(C!IlbqQaxl_EN_o&Z;K~1-ir`~ zdP4CN**{V*3_`l=`#a?RBt*ind;?pAwADTnXdDV<4vQ4Ei9OHAC_m3)ku{T)sSXTd z4>S1@+0ii}DN)`6i72mPVq#R{lqUUQPllq!rNk@LhBL8ZHLS{+{Y$FVFqtM~3n*D% zm;EF@CM;bP%RuoV3Ru`Oeg7tBwa|l$z6TSLT3sNrwFetJW{aynoGecF6ljozr= z5|1wDhC-m$C-+037iWaz{yl`6;Kny{NZ6dFPow_ab5(wlZjnje81<1)h3-e0e|_#Y zt7!N0rB7H(9ksgf+KfJlxYlI!40H^h_pZL|x?@r%DocBXBA`$HimaZ1&~IoQ_@JN_ z7zlPGsX@c!t%Q)HfQ(0$$Jk)02sQ{8WEy07+(RSRCYBJ$ka4q9eP`Te9XgZxKAHet z>=?Z{y~-)%mDt|7<^n+qz$s_fliht^W&9kKV7bZ;>d`hPDgqkrCh z|1AqN&8)Zf)2J)y&qI@>NX2#w+TR)T@^ty0GHpo^bboVBC+!E)_8BPnNLksr*~@%% zM{*W?LO0X;N4Eb?Dn&*17p>di-^$swh95yqe9Fh_mZ(*cjz>Lt^zwRz53;2=0w;#1 zBe6mHeR7t5P3uPsm8x&AGZz{Wz5<5!Ae6{-YCYX08#_f-52QbktkgI$C`6HzA*z!W!@LxkWe9lLBlLsPZRa&N=LdAh zxomNo!)Gj%ZzIZIc*nEaLbL2T40{_DEOS}m+0G0xEd$Nq75#ilvv}VkGjCMM0w#DN z0Y8+GJDC%?`HVdU!VQ~4Ny)q)n$h~Yg92xQRsjMKe{Yz@TO?wLXB-jWq?O&mQG?o$ zj>f85&>#QHVzEgHkrOKRvXXda=YRu4Rpu0!QGo32{j@L#Yx28*L@bQ%{TkBwPbaLz zkKn}n6Qy$hBSk^2U6I{!#fkd+m{@}50mPr-Rje!RS)nD~It(5J5=?eBG;d7a?tSDP zj=!eSxKi!whqfgJl*En&{7O!>E6IPf`g(!}(W_rdiQxuu^Vc_T1nPmJA2LHzuI80m zd!eDgDG;)6>w_V4{rcrG+kz_5voV>Ny__~62?5L%8X%VWcHtW=23cF*Co+SxaU7VlDsz-jdH(VJVHaXIzU8wXAOIi?PH5-?jC2+vU0`Wy#vQ>)v`?DO?5A4l&k3>&0GGCaG4egoBW6N0KebzMa8}M zRYYXDmf&Eohp7@9qQ|FbRAyAQG`NRARnhQHn3uqDN>56`hlgY?-69HkpN@U$VNc{- ziV3nwd=K;kJmYwU)3yuYS`+5mWKl~>!vJgt)Lz9|ZgW$Ey9s~tJ#XiPbmqFfH;lNo zXd!Nyw35kZ&;`}>eHUSd0~KPw=rgVtDk#-Z6%Yn0j=cuea{Cy>+8K8NiLq?#;NTws zjRvqEwBBq6OBa@uhx3?Lbz?3_pS1_Y(Rw)Vyq=4=@p@#lG?+JW0`McrL~KR6ZA}Hj z5KVQe%44t-Cg=z_65?k?#$f<{6OL$o%dBb9Lg#7)ph*pk!LlH-Z^aT#f(0^NlH&m( zVHN-K7c=W(dO7Jpu>zFXb9g5d1{RleRv=Y?BbC{7$ zDU9@zn4dZ~M1_;@6pbC1^vBz_=tbHq4G&`JNl=x7i3kW103BdE2V{&TK8Zux-h3QB zSvt$GZ&Q?WS0zMCF+j&)`qtM0yQPoCKl`7C!C5yHRtZJX>(9x4Mj4^Bf z$su7QF==3*Y{=37eP~VL+q9+(U<%4ecX`X@wpF zvD)X+q9)}h(yk>z{qAINc<2;QPDAM&{Pw`9k1@2D6R-w13J*cY*N5j>T=BzuK$b;X z!6iTP9_(LnS}6ljyU3$ZPeL`Iq$T!qE?#+)2>e5XeIJ0tb>W^2n zvb>N=d;1n4{s3x}W5PBYS1V4wpArO9-vX$P$xiGShrC8JccH;EJ`_~W=D^0Y$f+Ky zk9`wa;#Ka|&?Kw6s1Dgw2WUc(Z(q%+W}ta?Ia*6dTuc9I*U4rm92JuoZ5}EKd;m7M zP}t(|MAI?V-}21PJy4huTu$lg@0L2OAt$6hDVi5U?=v5=Tkhn||5DryGzjvSn6@DXlr9r6+Xg6+EF8l4TMPK44X6~Y9`DLhivdJ1AO>ym|!X#Wd44J~fR@Ps${ zhBONgx^G0tmkZbE^oj%UdTdq)1#*hu5gFv-tA#dIxy1%XBL(?{T}fhlI;XqxA?Xyk zK;S`>U6kmk;{StM;}hI3#Dy2U+l@1`&R0xkNPxpHj7uppA2b6hd%A zK%>SX?Vzy(Z3XsX0PF{@)k2EJGC9WNai!ul{vfk-h*j7bL5Nic@ELNhw{c1ad?&A< z)KaBN?1dxj%|^sO?mR_UM|gQOOZ4e5U;N@~o5nd)`J`46x47h#>b( zV7#T=I;G#)GH`d06P2@4MX-3ok*mI>D(mP>2R~h%ePTe${0{G{)MK>9#B{m^+GUlZ z>dN@t*AF;|98}k%eO{$`r4X$C(d=fD=GUw{NxC>He+(we9TPTLxSQT8A)jL6n$Th| zcMkGtwJ;B%TqiNaUk_!zkq;cQfmANcsK92*QLy zUZ8bNrv_6!8W8ek4H%GGEr=NUivZ;4eDJ0P$#FWivjMj-(^ea4T5Hd*?I*yL*}S^v z4++XK&{!b~3uI6%ju>VG)Lsq8B6)KKhlok(4ZMTp)-#N^6`~-17UYAdSvW+veCxMr zml#UsDc_W?Y&t65W#qsJIa5FbAkx4DJA|;vMsTd#paNl6Mt9@vij&=B=^dC3QhFBQ z4-8mo)CdN9E_TT@HX4+s#@>RI)C*S*EjX|8>o#`3?~J=j4`jjX<)USGKF zSAFhF#G=?w5(cE^zh3{ht?|2=7AkqD+ zpg^S%pqFjtY&aNy*JP#wx#2jV4gC)#>%^PY)E+W7&aCbri(DZ@Il39YKG$nXTF~&Q ze-;i2gttYgj3yJXCXzB8C#0P8oM!uVLN7(yt=M#XT;iVEPqylI9timW`78?(kgdS{ zqqk4Bdk8FL-RDewvQl^f-?|%fM`f@3oJyKNzutdlEpr@@DG&6UQruAy%jz(5MHoyt zEr>Xq6NwwQGx5n=V|Q`pAe5L80C;vI*NWo=`6-{(0R>^Y-~Czh1>BIItd`6Q_0x(F zYL%sX8!(E2+>J|IRcHb23KE;=oCU?kG7$P|YY~{ZMtw)WzBHcXyT|v)r3A#L^Owj< z5frL1F5Yi_GoUA)Nqk6XRBruZ`4%*U86>44^(`!K8r4j0Kuk`~G3p-DxEb+)Q+sy( zENV|!C-mW*&MVXvAJEH(=(Fm??wdM%9682peu#P$m!xpx{XBZH1FX{dCR|Xd=sqM; zj7DhryTKu#C4r*?mOB*b1hh3g6dsm?85QhXOK;eH;zCC#hj0ipTIF0xGDFrM1vhAE zA$r-Ec;`*ejqg)wt=oG4+qKbXO9k;26JdoNqA9*G8KUX>KW{*j1}GVm^gsu9 zuj+ItH3ts4SKKRNr~3$E`69w=$Eqfa`{X4wfqHhZ_M-~a(}R%+m=e{{d$TjyPl=t| zk$&9)Ly*xcTg%&Q`*~Z*^6*cG(>Wp7d7FeH_pN&73z1THINq zN|PJ`Zd04CWH1vH)}TU4=)Fp?YSG8m=YHBc$)y~eU$P?#*2)4FED#Q-=RQ@WBkGak ztYVo}$NWYi_|!irEX&~pM339ZvdPRnZGR$PqONK9S!n0s5UYO3|5fvJwe_FpK!Q-M zdCX^26<;c6aQTC)r>;`Y;E`RE$15kV&Xxt@DrP%gun&R#xA>e)%J%mZwT3x_FRke~ ztw7spbq$Y7KUn!{ncH~%qw#OCqON)T{YK#^#DtI$g&1eya+`Ryr>{`GOE&U=+w!7Y zgt?1%pdGOytkq|35>`VylZS2cQeku9W?*HVwJvn$cWPz@E)BCaoI7&BYpC zs)L*;W06qVn4A5R@L$2$r%>59Bp(B6EC%JGf2S1kiG-oats1M6Qd zNv9Hatrsr$r(cZMuh0F)i`lh2>L{74b-!{ipI9n#MhBb&56gN#jA6Py=<{5eE%Mf^ z7o{H~z>Q+EliNMb^1cF0G2MRcKCKtMO-IQ65+2OtdcFWFetg*)FS+y14y95-O86o4 zwnx32^w)&C#yI=(k1sQR-bw|r&|v1@&{aotj>D!jKS1p;9o?Z6w4jGiS#%h3D@xcB zp`gQ4_;BC`;qgapC!+aus*YG@(T35mJ3B7iQX3zI-12k7Zr8KlZ-5Th2-Z}&pmf2C&=BHUO9Gp8X0?ns4Sxr!&;1qkrY2nF+WiNv!;~kRDf}Tby@N@#nZl+-{!wT7=U`Tz5q(y84=x|}pI-eK*Y%=Plv z3ez}i{vXc~7W96a8|J%5`tc$4)5O+E24e;Msge?8NOKf}mAj}_N`8b!1s```cEZ$V zy?n3Eo21OVAsv@Fa*_5sFw0Ynwy369b@wx z6nQ!P$T5A9Uc&bJ;}LyYe+!;{PJ^qv;F;iOV^z1_%!c;kE7Ccw&^f{`Eg$@d5skqx zZWsQI86jF(xg%5s@KK%fUYa+U|K1z77X9E|z#Fl?FxTMEd)wBAFL`Y*@k0ONf8de^ zB+Q9i!?yb%_iEIKT>`VqE;R$X1UlI;$5281V}jou)oU@p|n ztUGt^NBM(lSG=9#Y7p*=hATrkN5nlO--%Y9Bnx<+}R{-RH1 z672kEz?<@lu<`atoB!Xt3of62?PB$cXOYZ!WGZj&4CoG1MMXvIPsmwd+adLA~d zLlW>Svpad;7d1JUs8!}E5u#N?ZdvlN_0za|gR56=3k~*WgZYcXxO9;J5Rh^VN6%+)LGl+Q8loEvwfWbIkD=kUj#4)rl8T0B-bU&3Ur7 zQ(<YW+<=DCQ|qoNPG@IqoEt# z3O_aEU1>36Q9BOHvX1A(88#dYuGeC&0k)RoFXC5^(eJ0RIXP zfaAo??MD5_mjJ-NX8_mh>Xd%DsMoJgKV)kb{oHnT9NUvPT#_sP!k zHwhJ6pn;H%7?Z|BK5ordX%p-y-m59v8St1m0p?Bs#0<`fZ&BH8B5jwmxh^Y%G*6AY2%gu#;N>uTvwZ$m10Y zvTA39Z22t+TT{`G_0UwSdh6S|a`)TCcuC0?$9)L6(`Mf&K;rFl(WCMa zTl6m6Y^X7@7RS-+%N8o=8*BNqJr8o~ ze>zU6#7{Iya%77^6H4FgaSE`cCy=DDa?Cb9RIcWLYy^UG6i}XBRG4LV-t+#meQ2@+ z6cC>45)g#k-yH;ZoMu?zQZ5l+!GR@zJ5A&=pG=|Ti!(0MmnK^WRxN?jnu)}N`9W74 zVr#(Rw$Rx^=%I9|PI|6aveqfx*p)N=&ulO)0$9kl-2kxILCX%na8s*z#uI-NnA_smj>F~KbMdhYVm&$L-3>)EG_WsJ>ja4*mi=|Q9 z&U!05Y+A~82z?;eND*}vXsDwFep;z*E+)w^steSpur=5dp5b*|!0V{%PN6>+{_vzF zrpD{KV&TKNjfgsjS|YL#@C-3b#mj&Pj>>>ho$< zF$EI+0VV*`9pKq-Wn*w$c+Uu`d^&*Zsk~8=Kw+rLFN=!M z(U|~r6JT-=Xd{5&z?Fyr;I2N#);7)!f5A~J!Q=f^UqS@r_^8`VpjQJZ7-L}&`K|4F zot2!28f#N?e?;KBOI36pU(1|EaPpxG5<9qt+x6@K)1jLs$ww;|YukjvGX-1b77{Hx zGk4*}d1kd{!-wXnj|y!GV1b<_nFq*R8*;47n8DmufV0e>if;9a5+WHPx1;N>lYB>0^Xx_MsUY6W?E<`q>&~DkMLI179sy#l zZw4Y72XA-3{gqez4ki#GOdtiSy|O%S3WB@M%R>^aCsKS>30aNiEfj>d`*WS*wIXe; zBuPiH*|%%>u9;gVHVMz~< zzc6JcJY8&r2rlvXq>0@2-csG#NX)44gyPZ;ga7(ttYWu9OM{}DCi)G9@_08 zja_{s;%h+}k?U=>)Km`&bahPRBxX_y(uVuxmV6dC$2lG@Hb1Rz=kFtZ{t{i8r&O+( zZ0Yy*(aw;rBOkTyCiI55E#+34keDs`U9jT{i_Z{F#Hf`ta1uZ z=aj?Hi=W5RC$_(CfKpLW^MHbjFe6}m;bJj|p{6eIO@{3dA0?y#V$6T858nDZpXAm% zKy_DO!f(9khyn@dRT#?qSPzO3gq9B6ZGdJ5nEd38xPhucoA%qs=_br`?ea^zC5s;b z5rfxxH5{(8G$k)C%(J;lv43H%`8B_LVb+Y2Z*9m=N7YmLG(8edd+;DfNl&|c9UeAX z5p>ac&SZJoay&k;HuZ;_6jA4LzG^66Zb*1R$VS)#vU#K)%WkcYl7gLuZNzdKyZg;X z21!Sa|6AF7KhraCot+Mh26H*1w6E46n}z<}oe5a)q@rnX4h|sISc{`K7kMcKb?6 zkKkr-k3O;60eHsa0Xcd4OS*xhYX)Vvu(YIDw3e0MGzviJN#|bzJP9csHj(2W2*Anq zD@(KYD3@V=3OVUdio&}n%3D1dj*&BtlqRGqJ7n^UjG~yFl&Ty|PQz-f$^tIowA;Sw zyzxH9fT$Q3@B75iKvl76A=jVN5)hkDnyU6XOwjpJqdK}0(U&y&BH11y{11R_ zbC}_ro@X_EOk~DQf%spAFFInSmE3Yfw^-s^Z9!>jvXGtk^LAIZF&q$4pHve`I$}nP zm$8Z}gai}jA81E8&li->Qg=!Y`tg{J(vlh{z#0e~^XM;{hdd4`YlSPFURYmoD%~~g z--25BjHZN-DMc~ncjea^8AxYwJpj6a#GjA?aYAsMg3idX&DiMC%xqnF2xzaGPazKV zc!wFy$vv-T)Ki>`bWVuMvcBuTJ#Wy9Xy(X{D2}!;P?zpVR*Wiw*MB|o9_!=EVBEpY zAd7!^x2DB~E1oQNLSgdR1W`_(ENz13k|J%at4Q&C4`~8@K`frtV)>5n zZnqP4PC={fKsytO&~)SmT$&&;!rXwzg(uwM%rUmMxJDlu6-ywm3pHtRKJ?5`sM_LeS71Q!;*{TpcgYVS zlu6AMmzDo>M*`ov;Nq?t&YX4z9ydy`)A)k*L+5t7yma zN?t++>sa=R023B-selL8q0}sDLG$XBQO_07n(;kmc`7>Efn3U>s>?x__`~`xtkAr& zaUF%%=6QG?5GN!8i=wa|CS5b7u$yuq-*8~KN2eIJ=_!G{n2Smr9`oGl&rzYpCskmd zq`aFS%HzRrHDxGhYm`>Txf`_F1`x?rmP3yQt{O4^Xvl+^C? z?7>lva5$E<<5emn?bK!W)4Ja>|hEeQRU~*r&X8TZ9M{MPHzwN4$ zJq1CqYkywK3_XWcsEF0c16 z)&fff#5RQe=fo8^IjCv6*sAw2WV%1Q+nLK>=ms&DAv;h;wl`Hg4y_{shE#Cf7-_Yw zOJ-R=osm*R-XRV^9bNry(P$=`QJ#k^3%LOz;tlSnM36ZXqtdjm`cyWf92i2a8Z%XX z@mYuLz|qlSvZ&lfkA9i$-JWkHu3zyH0J2kU4B?)(muy!b--@nnN}}ko(@bP2)h=dB zO9y+RiB^|X=z)ku+p+uiU&J(Rp#zsFEdom_qufQ$LbfiGbPKM z{(MAxY#UKpx8n29DIbO@&IMCIp{#K575(*8%i{Ek(|wMB@C;qHSnxKay5q@^&ecZw z!z)|nGaj~zr{cg@beUq_7-XY-fdZuM;9u94M+G~GQw9)&7hNz2d7PhW+#m48o3?#x zkKN_CF!h|L#K=_a-;4EtRiHEQU=#8MgcOSfAaiX=kxhs*1|TB(jtMHEpkb8&7}S@4 zR&YvS%?Ez3{!cObhgu_8zy;-bTqeLKQCo{sOJP4nJH|5Z@0A3{gT~f-+>qW^u&Ht8 zs$A5y)knunXQMe1cp&_9G9m^Hu0F5PD$f6*+2jzXgPviJnyORF^p8Ejou&tkviH0f zeaY-InhKAMAMkcre(uhCrvgh`tU;N!Gp+;-Ah_-Ez(zZ&T&Eo=ZMO{;?iLWL9;_@y z{IWY`NAEaW2Z$-_q#c?fzfIj#fdTzIC`*cobn7o%h&U72=L;!p#z|d`-}<{~xq{PK z%m3+23>%(4I&O7&Re??6!EMy&yRRBrftaP*d^Y%~0z@6~0$QsR%M2}q zw05$~etrUF8Jf{)veW5kU{ayl;H>I=4>buN#_NN$_EnCw{+N_A^lxZ6O99j?V4Y*uvzaEgM&-d?Hpw=PMw%89i~ z7I$@OD9tK|ho^k>R#P=#PLrLZNn?x##K4^nF}t(H#$tP1%%~4_uBQ9-g;DD?4`ZtQ z4X(N34EOXn{p2`}GwU9n9^E*pP^{=4o(|At)mM3n+YUKoo_>t_>U^rl13eP|0S{f% z(_58Q*CdtKbd-m7UE`Ws>)JM_bNBxVlu)^v`MSP0xi5kOs|mkMOnFnGPB0&c^ur&-MyE7UaDSqQXMiLaH3iE15<3Xhm6yU1;uer1p^$odEK0X z+9>+U1HFjenX+c(-&uP#|1>>7^*JtuQw17bXFIo~!(FAu$x*#`BEy#;b1ih5Rv?TE}h2eO}piY1OS%TY02B?|xJv{`WQ8(C}1aT9X z3#zfV@n$msiwORCnhs7`8pc z!xOT*R5njtC9+Y=pc|!20Z77)b+>vYcw7(da;jgZu(55vnyotmBpBE_(L6Y zU^GmnnxL5>UuzYIG(8*>m5Ff{=&=er03;Xu8$3uwhKGj=^Y|_jLvSh zAOZ5<3p?6q?=@+f~ z@2mqQy$YStt^K;5RiaeO0X9o!nD|Dt+*P;}G+Z36b4ACI$us2VjZ2xm$*{}rXHTt4 zkV9q|gm9~$UpRVsgmH7VWre=cPVKDMWzN1M6nUJ^8mbd{GlxXfBKvM%dTy&Uty&>~ z^}EUI3{$VCbp>px+3~};((7j1)Z8&NhUFUv)d`6|&4A@jz>}%hen(*O)e3Du7s1r`|=h1WH%>nS=ks29#W$ zNxh9I58-{Y)+4J=%~|Y&$S?~(#7dE*8l7CT=iU*I{L#NWMs|BnX`)Kd**G^{x(vI5 zY4XJI!`Z@@ly%93Q;O}bl@MW?zPP`=f6zJTmd_PB!RnPS4Kw3>v$zk^I4K#-b<-vb zBpR{!=oqq&bb_E9I0OhQozh~yba~{0726tc;P-h-+$QXa1)xBpHL!t4l@#mGugVE| z0)_@WP&-_}emi3TX-J;?egLAd>fBA=P_3YuFq!`Ch|aDP#QVG`pf2R5%e@65q}_K_O|gFBsnTLp{tfvawh@_>4pFJvuAVd3 zA>GHIXQ)@Q(mkPnVXiGs{)#%FBd+31ml8j9bkO#NpwnYq-v|x~IDWve_o)F4eg86R z$pG;e6hQV-0$(pGF2EXq22^1Ekk#)Fz}>hXOxnsKPAhLv@Qmp9u687ELE9H&QytQ1 z|1~61F(^aY${bvt8XE3dgH8ZwcJdNl*Cpg57WLC&23selKleK3fadj1;8>^$PS^9zn zh$5LlSI|v#fR3fx=TaOtJ9}4ou$^sj-`>@uUhZG%bUG> zGH;V}<_VP(R7kn|wnM{&cU7UUD-}rkDsOBD0_nGQ?Z$QVd?KUdR?oSpi66e=G;q~F zt-Dzp*j%wwnqDVl9yFhMj9l?x5(x@rG*gda z5jH34Be?lWrzy|3e3fhfO%gK>3{G#Bt=N_Xt8dEE7 zPA?ICeWZ6EFGpM1^+rmBar>SN`q7YF5BJGKAD7e2ry>Jk!x$TGe;us8ay+0oq~QV_ zu7)=XsSY{&a)4Hnc&SFpp4^vwO0S|uKJQ709PBbkO~%Y@^LooWVRG;QX+Btjzktrj zzzUq<p#3E+i=8YmN{r!%D0A{;)o{jjBAZ87Zl)F;NWokXEQOo-ipt`Mmus34S zGGF>eZR)SDS{09t-`ESaGs^T_&~pz619W?9v!X(;M>(EkdNf}9{!@I7xhUO2Vj@dY zofF77dbZ@k zM!r%H#r_eQh^e&lx00q;p6mENwXhH=6Rtt$)e2F0Zw@{VF@7=J%0sKe-g!~aEL4IU z)tV0^g-7Bks_X)_WhK_H0KY(eM>9eO+5%HH1?jRvl`Bm#O_^YThe!3+$TJbxRBihM61z2jILjoeT<&g@uz7N(DvP)KoK> zYEH{y4kLt%naaMTyzA`NMXYvg<4t5bNN!p6rI)*_u0KwNe9c#5F>ZhU4*-53PH8Rc zD=jnvO95|F^6`-&U8jfId?M4Y;^e4{Qn0DJtZ^M*$VUcA6jtnQm*}%s>pfVmjQiM$ zAx99(Face=YYJRgKUR8GXOft?*x*8<=&R?xC68y)hq&E^jWMh`cCXl8dw@5PQpaT; z1_@~JkH}CP+5@ASj=9sqJfug8DCqPM-5E4Yiz^jTNU+avozJk2$>#6^(7oSsuTPOt z622T6;T<1f6;0sq#Z0NR*@gpA*!?gP-I!s0=(|%Pq+b*bI9k&*B0yiMcPUKAnRAO5 z6!3T8h)p%(9V;rE(!;I&tpYy%5k7)c`th6n3&H(vpu(X|R{&J3&z(AGdU-`N4xsh) z53K>#*zXZIFGzN-$w4TS#$pqy;8&IyvX>-%3Ja%4cxb7E0I*&yG)xu9Fb`|=>Cj;| zj61l%_QR@f90|{zgcSj{xg$~#FFEnE&wn1ZGl?YB34JgA<_YA_PVYhk!j}nD25S;iqhArnK#~SPTelB15A( z1J}YpyYcrasP;F29Fr$)a`XgvmMmPp$_lg)&2B$7v|v&*uYfL8XA1yR0i-5!W=l7Q48p!c{Xn*aIqz|>&S$txpg#3~t-fXml{$&bWBCBi9+z`3l?d00sQ#XNx zXjNwSyD64DA;^8pKE3bzny~a3M8XXF9y+Qz)9={!+w5;b^WNfa^Uu%#afRs#lGpYp zY{Z0)H_zYgi%I?#aZ!xuC~5!;6T0<6YTIQ1sGCFg7(=HY5JmpwoI?I-AU(fAPn+Od#cb>qqDfvjoshZJ7`k zjG9xQajL$%PI{kHZnVETjhdv+>wngZZvhWk zX&%R13xLz8Mf}RKIB5R~2Hfq|Pl2MFe|WK($e+mJoVo9dZ@Wrt)AFbt{0(H|HKO_w zO^(n0mLMKGDQ7cSf^_`vT?fvG-_8$^pdK%Qo|<>l&;eba8MED8B5*ca`Mz&(?KI&U zf;xv9@kVN>AFfaG6lNlRW2H9oq})JvAP{Sl-#0$P&l`_XN}^J?(*p@)7HBMi4T7@F zZJpS8@qHmR-3J;P@YP6|2S8Jk*F>b|$55MkZK`IaA`}Lv z^twG^NC@u=U^=A^x*uv;KXt)RxLGcBQO4>cXXU{Rv+6gh>a`%VdS#OGsspzs;3#b0 zlmPnC3VZybUraA-@xRn%M0~luqM@1R$mw)*cM#SYOy<+ehx#6MWs<)3OmjrS4kI0aOxDstcQ}t0*^D zCpXa5UI3`Rk{+x&k|^C30M0YC4bLpMV1N>SBg=Z5EnXrz&r$5t>?cP905jM;OdFKI z+~GK(Bh_~@j-TlX2INRDd94LTuvehN9Y_n<)yjnjsKBMCjf&3zSK;zdbE|Z- z7}UIlsyPgOSYu@!Q+}A7?)Uy*G$;Rck^~?t?siKtDe3?Sb9@I^)&#NPcEYU*;xpRC z&roE&86l+rrz6Bqm@DHw^2;sUL7^78J-rQaVJ2q;;I7%fntCS8Mn0QeG4)lgJwFAn zUb~;Vo;J}415qJBPcAQ4J<$RrVQIeR6C3!26I16oxu#|f$)E52W&#}`Mn?g)Sp4Ml zB;DnRA*RAxI(io1t2bS&%=}xuVin^1`Bvd-0C_IJ$W_sS1)1%}5*{8c4DZC8i70qi z@8GG#by)F9G-nqfvFJ#@5J)Qe>cXe@!w>3xEHv@?kx<$rS6v6j#bLNYV zSQFf{x&7xnQj~pKghNZj3-y)!f1LX{<&=TJ6~4Xi2etQ0kcJ8J!l`x zbT{pII$hsHB`t^4PYPBkkta=-H68b3>HH=5{?<YCD>M*2jr2W#NuDk(9QhJ;C&mz8NCFOgQ+)ED>$d1t!VQoq6u9yWp>Mlogk4Po8( zJ0@u?Fg!BCOB#~jMGl5v7}=)xbl{D0i$+q)Ib@n2(2zu0ijp9cA7IzwI!?fzSMKi8 z6*p-M&(29Lm^imJ=qH32Bi1I_5|LcS-9jQ(0-mC^-?z(fd`?W|+I)pGyZ`=K%PWb8RW!&U$ zB^X6+dt~}^w=9y%`?>pzJanpQ#N(gFu|~8D>LRsPHw1ehY!y(y(G${%Mq=>c@q8fAi_5%B;rprDdAdN{+7=4O-uI zt!5BxFE__8Z85xvfyHs|EED|9VKIrZTVYlnY6N3g^)Gu6Cz{eL2fX^o9B? zbRoazbdZ-;_^@tpe9?jIMfSw&z0l+uH%)+`Y%e0|_cvh6Xa(c?k$n(So*I}YiK^dd zFyUlH-hzBKKRBhuk5}C^4cQG_-xuapw@Y`CuEDhO`SyMUXm8x-3%O?oqKJgJJSf#O z-O~0WmparxcFTNN@!P;-AX>!x;&Z($N&gyMpV>bJ6XU~JAKrFYnZk*!3g<{;wM;p2 zB5zt-g&+zcBf?Xm!b1^Ltl_(3PXxLaR@~;6Lh+CqOym{D{R$tX5nY&vlc&&A?6=_`l0UgAL$+E-9nCbo#PLbfkBABF0Xmt! zRcK0s{{z}r(`-x#+F`j&+L$gyQhU6R7V3y1Nw|0(&31=J0d?n(Up>s|-2}YT{Y17N zg@Sfw0~SLoH)C9$1o84Qy>Dj{ORe9S-D;w3j!mjT^j1$hCLZb^9IPf1az{$$tQDLG zQJg=zk)?R2Jg*Fl0?_Ik*D`X?HF0&pV*z1&%r>lFF#>J8%_lDL1mEuk1=c=PGWW;| z#YZczpcp^qKHn6sPdtFrmrobHrKJZOMqFY#&z>MeckhXn(g#uPFw}}T6CEEBmnk<` zh6vk4xdDzLVLQ1;HE@DOtbNBC5=LUxY9ls<`U#2%{sTJ(`8HCg=Q|!(@D6?^*%+&N zE;p9BJ&#r5+E1q(@*b+ooWu{(6W>z(S?|h2M5h4f>SZjCf!1yp@in?j2iV9U^hA)s0gnx zPTCwd;;TM-tPSd|hG_UBG)Le&BBMtV_qaM*W?be}1W$q5U*Q867$B|IzoZJ*;>t^L z;h4Bo|7AQ+U7HY@-T= zm{9F?98hK%;B__;rjyxd#%0lyg(9x}9bT6R&HmaR3sc4++6qd~k?Vpv&x)N`{YB=q}#}@PZq5H=fn5m0) zyo2}Vl-hA&)zy#X%q?3YEmQ}*Mz)c6%{SLzn}-?0-`tr){N;TE?lZlmWH?dvxH}he zG-D5|WA^}?chx!TExhpohUp&KoP!Pw)(Zmko;QFa{inpxj9McEj%LJ=zMjDG} zubZ$Hy-W)&zKfhdj?X5qAhd6w*_E!(jrfq2B6lEQ%abbmI*>q9?m$D%q(^t`QkUHk z2WA`s+x(|GVQM6z8y%BEBF1w+D`sFSm>sC8ZF=>u)nw$|o{Vx3Hz%V8xtgzO6SB7H zDZ|F(J_~m}0Xt13Gk4T>kuL|hIm%pNzS&B_+Z?j^7J4kf1^m$GSZCIn8!za!->0*u zZF{wk`b@AYQQ0VTNTOfIo)^<$gQrLj$AvRDMOJ>$hIcU~1~S918_K;j1x6qKTwpzA z%r2*f?~1tR3AL#Pi~J2gp-`a<5B9Ctfp)gG1{WuZI<)EB8;2-zd&o<*CuUyE-Jb=;nzCPf=nS zUOSl*C?2x*b=_*^sO+?R-0zj#jr5q^oTnvLM7E`(=MQQn$6pMt?Gno`eb3~o+wv#W ze&UOzQ9;6GlY0;vA$MT|vho*}zXqejsA<4tdGhJdsV~aifH_LP#=I7cb;|CJGYy9S zw0HkW!FY|+YR+GTop-R+* zSsztPJOv-tvF#fQeCORSP?-99fAO3j5GwtL?Om*SKHN4orOn;BG`Az#o<;6XvzB&5 z5SU3B8vo#Oc@u*bqIq}H9;#jG;|yv6XWq1T8^Q>aQ&6z0)oJ1bu2QbV_BdE9+$8*B zM()M<%PvX03$AOJCr24OUehU`)OUgAaKM9hPaRdM!U#E+(u30{mLXXcBN@$+6__Fb z(P(MNer4_dEr}~)bH7vv9*h&@1R0=k#H5ixPU0bo41WPi$P0Gjf2d)gDdQ)XWOuO5 z9CIyLA@o6G#^XChOYi97e{qA4O$6mEb`T+4lxcHLTkXg2)lRlep<2X$1#2K%KU851 z^-~Y=q^fE+2ooWDV2eK#-8>(So}Gq$*+E&cJhDJ+sTED#6h9Ku#cinaRHAU(A!L&-pL6^UmM`6I`;>o3J>Cau-z|#Q z;H6POJ2u;2yIma`4AA-+`D?sN$Ngt;dIL?{CmLG?R5w(vj~zN>8BI3FG=Ggcv?!6h z19HcdKf@^tI@zABUvFe4p{bE%q6~6{1I@<#N*%4t$Lv3lv7VYUpcawBg(~q;A=D9W zpp^q3MB`pOcDK7~LzuRZ&CH99DpByvMf#(bU0H*U7m>fQomOhLz+OX-L%NDxLJ{#H zCo-w_FE;lqf$M2|*`eXr#I43jN!H4GGl+zR3BC4`TwL9$V3JzYYU<-ckti>rLGwGO zA=|v$v&HL@;G4syZ*rUqSs_(>btji2NHfxZXcX~@v7);2L7(5cZ<5?Nx)Vc;==HK zt;$^z;`A=|i8pE0=_-r(stGZgyG2&iXK8-kF>cr-H*3pSl*G}jj*dR@r3 zH9d?Od#1bKHDV_eY)}53-=2*Xw+71NyM6tKxZaLAMR5t*-hPwyF4POX-T{RF-Q{>N zI1H>%M?F^^_jJ@>g5%im7QQB9`Gm}ogM5T<$}`8nbm_OuK3&#M!|j3gQkGFVl))StQ+zL$>N5>&Cq=G59;_aRvOn+nmJJy~g30r|#mn-sVYJmm%0?8r0TnqV5mRVoHl`ll2;8j_Ex~ zv{9j0|5|@jCj)SPie9J}T3~BiU+~Nam-<IZ6$f?0)9oPI$%%|fn$th=#@GZ=VaL{}+W*hbB#}BWa zSKQ+X4933@Vo8Wqi|(ij>XL)XstuEK5DWwcLw>T>xuLWzr%L#pnkyx@l5jVYu!i2E zN+Olxi9eb!=VGdFivN0=kLg|H*F~^GaOIWW2M>Ws!8nB9-rn9o64c0*zu0Ddf#P{o zi*1PD7@~`TnN^+@v1a|%K#w2(Nf?Z9|D+0xo5N!@??~%`1|*9^jaRnUBP9(>Z$o*x z(9LfTgP+^pFNQ`4mRlQtjA9LG!>1U%BnCFbzm}ZkGTjdtX+YwaM|5q9@v_(Y6NUhb z45?~iDUBS=hNX)t!DpYT>`RC zAt3O~$2kp43kWQYPh>B?r^+5x^)P?lgCgItLU|C+YKQRr&GY)}=SB5x4%%1Y?*1FX zy#%MC>%jl~@1Q+E1Nud7+SuLE6q5J4%n?^l0;2o{$PoO;uScwaoVwN==IL)aLV-Q+ zNiu||tI+-dGZfC#MMx(QP*6VRxhEVmi~qB?)7*fa{aow^^Knv=6Y`>^e>}I_Q!=A3 z9y)o3&9fDV>1cm=gN1AEm-1SY{cWy;m)=2tTEX|1fSX4;*5`P!Khu8);dzb=op!ztbm-HU8JZC(A!@!+gIp>u<-HvTSS9xm)#$0w?S{AB3y#$_m6_dVfj&rWJ|JaDxjmD*Y48qq4A+mzFwY!ugE6!NjjwQZ+bw6y=v=@``; z#4=qJ{ho0P2YH|xoZJ|M&R#@y2NtuKH2*Z$G@E&*zrT)KzTmU0ybj`tf!6|wvG{bv zf6m|*z*4|r52Gqvn$`!kfO}U_#7?c6+L;r zNy)kr0)0L>nJrk=sUd5La_fZ3XOYwC38whQHaF)f%Xfh&Ig5A8su3tMyXc;A+SPDv zMaa*I6{v4Lr+f@&_68ZbGBn)xr90BU2TRC9?7u7Cq<@K#rNr}KY{=tRx8ttSu{vuN zIG7LYZkvgT^rgSuFyCvM9rRw<_4`#fnyS7@946I#)iT@6GEG$kwen*9i@ao{rW7uL z+4+ZKA*kBmIvL4y$HVlG<3%sBUTxCsdt`U=!*XuuK~)s11fs90appg2`ujtvFbzL` zjOMoRQvI%zTe2;C>MD^!+FZNGG5GHi|8JijFwU@aCxg92B9b^^=bcAl6+3*fZk^G5 zD_vQg(-z+nmHn|r!&Os?f}zsM=BP_nW2&_|__g_u5NS@~eTA!8 zcv#sQL-GvkqQgd#=3H|XOqI-3$|7^pbpH6m)cRrs7;MwTqehJpyMl(8r>>}`11XKq z+u-6D-fR+uX7%Tu?a3Tx#T}A~ptuAIOB0ilHg0=KYpO!rl+{pRCs2fjqp7ekHZl1* z(cN~zonJ8=G}FR4{jR?C@%Z}xM$$9pgAhsXsLDm&5{@;HJH;tfv zb|ZU4#Wl9ZJck5C?Wzc?IxagE5N${dJ9LG|RrdwUJH0$ZJL7P^m_qTqGb=4FPHX*0 z`h+c-&+iZ?qoysdqhr0oxRo8pnqxy*U}Hww$es!7P?DCi{TMa9gGjosY{HlHjCgxe zp~BXTu$WM*)5H2%#M+5Tzubbe8)=!wZIV4@AujLZ!@6KSri^0(9DgVz8$Dp4ZHq?X<6s;Sw25!vTsQlMu(RYbM@WK~6WxnTVaXxG^_M)wxiFMrrrZ({>xUd-_L$wZ&cUijD?i*Elb@4)N3Vn~YbCO$k3qlXquP zH`~iyy2Zs5UL1)UpNmS~c}|{kd|rsAvXBrTbCVpg+{sl9ukyS%h6U*rC6{b_U>T{$ z7ChT#vgP-2TEu(D*LYcE$A@iPkybw%RKF_?UR`@bk~1@piexlqRI1zjeMtRmJJjQy z4xFvw-}vtHeJ-x@5VfwVD^V|Os_NLoos{cvlYlY4u8q#Zc`-(PS;&`C5rIup-Css0 z&Dy5I#JqoS006#R{q(eZ$#Yz@U9{=;SeV?D9W`FLlu70KTZq^t`h+|x=+Shn^#BG9 zr;7rkCa}Wq+&{WKan}EUr7KYi`<0(wY^fB?M2xySD|x&2)9dEvBHd;X>jL@@^3;~V zhc5?7X0TQo7+E~5!S3Sr&t(A-aPpICbR=zNYct6VPF-EXQfeCBA3WC++KGM7=+RRR z`K(i~la2XVC9(Amu;_xv+*?t{XDqh&^au?RF~u*pmG>n<-{fk(rO0IVA64tM`0N?m zYAWY2v2IOkiq!A_*v)!-7n@kz?+_p1%xDQBTge!SO5Nm_EzJks=_uRyU-a^f*HMzJ@x_Z|+h~CA058c(E8Ep69S+F<#FXtgpq2 ziayijKQbb%5f^rs!qfeIe@S}-^C`hSYF&=NZcXkIffNo+<696|WiUx=4cpUHZ9mye zekX}*GoVTI)lu?T&8gt5>cX)14Rmet)M}Jl^a4WA`?yA?@|+dUmM!67zN1cIQ=F=i zOI2mebZtPe#Bq|QLQKDGZZu*&Uz+9=Zy?xxVu?%-9Qqmc$EAoOj6uiGqz*!j_F$G=7$^DF$NOVm(8NIZMf}t>+~R zBp&p8YPkBENxcJd>kgJq&;i}btVz;+$Lny|a6sOJj zDzy29^|!;0OL8BTvx7IWVbu61Q*o_x0h(k}{i81@tSyITAITeGhYUZ8>sgE(?1yV~ zMx`F84hgoF0={prr=X^t>mnJj($AH`nKA7hW$6Sl;xvH@`pND047o7RTtnG z3kA1WRLyPeErbWEjRSgRq3lHnR3+v0+tf5w65+m!2aEZr%P+Of*D#}HmkeGZutZNL zg2!~WLSnI=2_Kepwn6Nj-0b`Za#Zzo)F&@vby%g_iok5-qnGkM&dpX1={!SyF6$E_ znLU5M!ujNh$KpAKE&nZ@^G5xYcx%@z&Ic>0YTH2&_yn{1nBew-kT$DoKw|OME$VhC zcC|S=^lVmbM%bRHI}&n9uw)g>`S`;<@HQqJfquFD zO7SR}qv3UN&s&uqgYi{Cz=FEt@3y}4oqu_8)ca7=IhSJ-q$=CP505~^bsy}McGCCx zEE$5*U9qd`j94TAOTo2y&xXf^M?3|EPU6{dK_3P#SM`;u;tB%usjAi8NY{FWw>0;+ zsY*28%eFPIT?_4*U|!9kaP|YvQ{pVgIMQrj;e6w&_{NX!3O%qvZt6g$o_CzlkAE<) z)o>s!Gu)~^68rs)0NoQ>SoUQc>hx-6vSHSAY`SH?2=o7dc-jGg}1 zri5Qea*0mDc<-ONP?JioshO*uJM(?MVJze&h^x&0F}D=`{eHAa!;$|50;*I21xNF<9yN~umjb+-`{I`cw&P6 zW7-2r7@MSU&LV;L7@&&_aLk-ZuJzl}7_l=mFpQ>XQsj$sGjQNq^v(fxmgZfrY7?HN=H2m80pC^^ZPhvI$5O$VmE*kk zpn-S%EtzpXWyP{v*0h%5C8_PFv6?mpQTvTM_`4b9?PF}x=7rqpXn}%1s2IRPqH}(N z2@mu@SagVjcj`}5mzFs?VI!A|0w2ufmGG!~cu6>07BT`B45R@F0L1nl!ATkSM2?}A z7VAUlkDG8Uf9O3$=zNOY@4-(05KQ7mGrR`Vo&W>iJkawsHZl?AbYU6H zqGq8_lXPJ##3E>|5OdnyVSHR0I<`e9-10?^>64(<`iBg`u)F2}QC%~XSB|0?J6u4p zApHH`eG5AT3h#Jki3;!d5MEjxGc}scw0|$3uvGTbJz?}&XroxxNQD7Y zGTpK~Zz-wFW%WpX2ZyE^%`FohB9XuGQv8&yI8m#9ILn=fT|0TocGf#v-g0c#GrRni zPWV8i)<=fp#F?U%SMOCgY4m_eNg`W#%>KnT3ZdaE&8~Btfg|fu2U^+BuaXB|aq>T4 zqhRk((Nr0r&e+Xx8jO3*Vatnua?gVds@rhyKhiBk`lm0zFdYZ%1oe!eKeW#%IbhN~ zLW!I6mXRai%C$2jucuWk{mNhZRq4Pg(#&k5(S`qiuR(KQe|blLpdF{;gp{hh_j;(+ zT{pC@?#G~BKF@mH=!SSd?^lB*gUk4+r!JmRF=6+RO7TQHg1=K_sb+X4L{QatU3a(s zGPA8MxJSREi!*iRSZqa`;#LoG{@qHwN*%AyDkou_0FQJe4JXYwqo(77Xljnv9v^3Z z0(B5Lc7OLSf(@)#hJFmKUZZu6Cq@&f}gJR+?|Vz z)#uuPgl70Z3oG~uSElL0ChLbW&|3+PgjL5deb1OxcfkWVgL1RQ@O}p5h&rr@hQ!rI zwRVq4w}?dhq^xT-!|2%o``wZwB&FBVu2g@9@^9|39ug};-J@jP@}>)D$kbXgmSI`t z!}I`>b_$F-27?2)nFJll?UQ1hLa8IB_lrWVg5XvAbi`!GF;Skstz261IK2b$Ja+P# zdgxomv--)Nfrvo*d3K8U#z4M-BbjRr*0oZatm1+ja*1+1B(7Y8H9}2fg3u`}tID7W zJ$b_30NK=6osR6;%MHdd5__iO~3TR>&<{Ng1DMA;|kF@cmgQ_hZbMBOS(yGI&HHNDiP~r{N{X_RNdumvF{@3C zoKu1i#+#&_onipf=;crDyL95|vsliSqKadw$|moxS(T!IZKz;;-}{D`W=WN4(G|>w z_cDE0H3GCc`#psTIV$*#c9mgRj@t91FR}n{e!@E{WZxV!r>vBqtZw1gnjG*)#S2!7 z--^LUilAs$gNt*vP|K(6Mp@hvi!?Gx8|E|$1fsqJu7vf!h($2?+DDCEUUd+S82yc< zkwXtvW_KGV$j)9P z?IKDJcx$iiZZa$_&`;pEaR-l^D*;`3LHLO=C{t;(PwgQ)*oVC$EGCXvEXoMKwjrmo zSfk53Tittk4;tfdSM`wV49Pq@!ctjiAJI=pw%TPl8te4$*);4v2E|WwshEKb!~306eH^E1Iwn zkk6}7A&|1HClu^Q6DjU-;d~fJ3WpHwX2gbT5r)xFhfbTgr{_J^F8iW+2jgC2(Vm>IJ5KCq7Sh(uja4>?UI&gLQ#0Bsv<;-j z;a}s$jJ|{ORQP8V{Br3HvB$05*;5O_a4ijSD5X>iDslKklW81H6Vg&7+f*PLo#tYk zf5@ed1xOh{8{rz!R4UrqYCu!j!I|2L8hx4Y2H(`RT z-}JFC&`mRV7KT^J=$t~D`}op#5)AS73=6qoy~whYhW*^z0<10ky@2TP8Sj_m-1?Bo zcgC@TDFRM(ErCO4Ta!<$8kW~Ysss5QJ3I_8_nEH>!g3T0CHCitf=%8f!JsS&u?mCM zSTW8ayl6s;lx*vEU4tysdQWcmQrh;j@>xp>rc*wr?So8;-GdoHr*F?qMvpUUJstB)TkL|{<#op>#kQc=EWg{ToHVZP z$MSXlH|>rgXc6&2xj%KcpuJxQNqVQ@cxu@rja6_R1E!cqY7%Mkx5Sr2LS?A3${zy5 zQO+1~gYW@SVV^OJA3ad7pQsyC)Slbmx?j(|+gNF_;S=e+@US1*Ef&q|#y}o5BC0`n zc#L8yP7^B{1G*>#ygT9J`l)^CEclKN?h5?~Nthf^c`>cUcFNYP!X?jfrRyg5bQ=}E zQ}G;0RK%~b8QvkfFKvNsWqKK=vAz09?1V{?d+$6Ai?b${ExsMMPZ{&M-l+4Vlp`yF zG^-M2iey~elDY$Tu_OsnE_f$~=VC^o!YHkv?2*US4K3r{^#U!SdD?iQJottR{~Rki zo5g}6BN@$wTIV#d0B@t*?P>TOy+=*!(aSN@u?4)DSfBak9s^LNp3cA#xos_I`kwU zhIY_Db&LhhyG{#KckhkzeVAQ!4D_RTkQ(>6dlT{kMAsjoE~-f?tmMYEAB}&mX#A>f z3|0I7Fz8kHWz6psQ+avv*d;y055oZ{D^25;YqSrMHj19MJ|vff=6agVPb zU`eO@pl|M=0kbsGb6)ADDmgR9^zLyig+ zAp|iP(~iZR(8MootDQEm@6Fk>V(XH8nx>@QDBAhT~=ao|(-b#=b&FqY-^ zpvg9oeEc-!$LPm_lS`w zG@#VM7_RQW0yrFkG(Y`rU3#FqT$#t{O|dH+s;xqd6bT!``kWg)sUj4qNebW!D6BCm z#4KswQ$zV%Qwz?%FNgOSW|Kz&dqVcG2@;p2oW@rd3L}Sf1F0RW*70G_i3oPHp5t_6 zxm$Oo^A~Ft3d2MJCd#|X^9_FHRmFFsQR4h?shs|B0Xvm3J!a)Km?#{nc}%nzEkex0 z;#@Ow^*=XU8MP1>Q5&3$698C7Wcb#7COM)aOQbj__l_d8GG>|s^ed5^Ug-$5h5Um_ z%T~Sn@+r1Og%6}b-+waCgCmWyiaL}edvxr}tETum9E-CNRed?=G7LX4DOX2N#v{RN zb4(>IM6Y|;p>6dQNO95TUO^|%D;0>Kj`64=0nZ98-*dpt6HunwvAPv`| z>*Wb==Xa;rp?R(&v(*Jvw8}uZiIv1yZSv+f@DJPUtaAp54i0c?MxeY-?A^}7k0F{V zMywetR+OcwCL*l)l@a6#`P|VhOS&iNhWO!eR39{yjpZ4rl+Eh@-*H?4a;*>R`LZhC zDzx4!Y(!d$%{Tu&UFJ5V)1ImOT}>|$e^FgmrBfI-Y?XDAaffflURifcHZ zQqedUVC?sRjN=^?&Yas$zR)Q~h{VQ~9H&+e$_HZ~%-C*R&t}@@@^vNVdGL-_HfU}R zikFL}5~40vOTf_TrQ|puO_#*?SHy{J;uLF86S(Y#ifav}BYK|}5rjG*vwbFke8M=O zapDs_OcgAMhYyJ_U*1=Ar2hQT@9LArWw_v+GQT`dI-dNZ$a#bmk(l-zjDq)fndJ89Bt?D`trKN)DOL0ig~OA+BxDWr1L$C3-?TOUa`h(50kvBZ_cU zA$cTn94!0XY!R#Z?)U*LZJuP2m*^!HxRHjO9o5UqKcHLA!}_Dn^qcTjf)@RiIm7HO zQl)Y;hFGhXWbzyi!v6sPy(fjxic{!l%yp73&g+Wj8a_Cwh1e4564NzblZX@Sml&LO zbHRHo?=Ru*340%q;+3~E5!qx(F#J3GcyO?Kca^}IS&<7gwCxTKTD=<*H035d@&rrs zjrQiJdso)+i<_E{cDBkT;fgIJ0)rkOvh@fGTzrw0%fr92&<|%`G9q6c<4o35?nfMA zmA|EdK57U&u!i^>Sf~fSdgz%Q!`my->j#0D?S3RqmMJ!4;VMZdW-o7;F;jm1Lq?P$ zUoB`T&te71SSMD%(*V!S!9~lc7~omxL)$fs*#F^s+@O39q$uxA;%s)D`KDz4b>SY~ z;C)u%nj~|%n?GXlqqv^fH^rS1Y~x3&g+!1TS|2464R=CjyEw&U0W#a()n&WP@!rMZ z`#cx$4jj*d5O6Bnzid}~Jh2|_##B?!QbI8|yN(zMR<1Daj!@zPHi7Lkn1pIBYkrAl zXQzedA9NAfxrb{}Y6p2*Y_ClNL=mm9oV1SmvLT{{^{GO&W=%{rW790(zy6i=U^vE+ zKQWINai&?o^Pmw}l%7-n*C(&^6F&79l$^@+*o4E~8*V9sg3SH;XwLu1w3$(UoVhLo z@R;m3sS7XtG|TUJt>^W*Tk8;20Oo`Y+l<{0)JHxbkY(>ZG`d!RFnepbv}lIi0mg z6MODu{Tl9jf;|$&d7*Eb|7>q_&u0E;!oT7LPP@quw+W7B-!7Gz5thBL*V^ykV*A`u zY^=H?g2jKhr0K%&bXHf5<4r%+pyN?i1H}CCr152AO5k>n0f7hJW~6y7LvN;uh6~kS zhI|T<_%?slt;*op41r03iPmy2f?vb9#VS5=uGtyx-4O5wByg3Sc%K5`^w zR$iLy4qMtb*v4c&9&yg!N@p#^twDZkdIh)+|BsKEhvARH4`>sFJHk#{mIy~$w z_Hdk3{Yh1KjG4|-$aHK%*EO@zi5>2n#D>#^DIn%~QL$svnCiOlj5bzaEAFt<}Yed=8pPI>_7~W%JuhE$S?}XzZ7;vj|%~uzWuy_jO-HtJh|46-wyp?`wF;5d8Ve85?H(G7ZkKWCNc|c?o$vGghT; zBq~8&tH|e!ZC{~nAy5kfGkLCYmK=^J^~-(fy2I=G!#RM){Ux`ZTSmP*-It3F9>0|O zyXskErBFi{IvlYF=+`gPswU_>pvXW6cTAvwt0J_0LWVEOrzOK7Q8i%44el;UulsN9 z5f>dgydy%%oMiN;D2EqlNj~3RNdyiCJ?GP%U*%@oI5e}CuC}4xrFT$@7D)c@{pA}%d_FLt<)zM=J`-MKj90zjMvwuEP&J6b?1d%zdp3X^E{aO zKJh0!;O6s*6xGoje-3YX*7pjHIs1!h^-JZMEx&U`;G>;}a=^gM^lOcGjo=Ncw9lPs z`Lmht@=62mB*+8!FI;4;hsQsv@b$n*3q3ffrZ|+oDLyDJ;(`O&eW4mt?A$iLS3n5B zb*MC2iIx#_4m6f;nvKz$YRH8vlaY_(a9Wz3?&91`ezh+$;S=}NiG8uD?%7%KT>K?| z+Y2nGFa6S@p9?p%;=Iz3fO!hI2{{#Pv)%ou`Lz@&rj|cyz@G{W zd5fYY1dAm11zR7P>1qmF9!<}!<#fGMBJSD7>Xce$XsWV zP`iIZ?$~%Bx^}RS;PJEpO_xc0*u9ZOHK(5*B#(ZdHrCN!D6xT~Lo+(`IxAGbFLqO+ z?npawO(#&#U>x!_kjqlfVb&?ZzCiG9_%e_#K{-JEBcV=H3lF-@6)cwaIr$CKV5vk6 z5soE5L9U1jL%}^#H^Be?T5s||=>QW;u|4yP2=xV%bcxu;E>KkeP_A6~b7J>lq@g(h z2k=C)SiGu1$?;=&^NpjaV|}Hc%5Ms+N8yEF#D5eMi(Y7IiBEzo=w9~66FgG}D#C!=*ph$z=Xv69^$w-4qS z`RJUwZi$RHu;25jHF3D^COOR<@D#%f6giled*+>tz6p2~PVG{2(DR)q@jdvygwLpp z(U_)fYW{7#4QIU~s4q2Z<>NTmr6eFr3(UC7`>xl9hC7G9Z;_v8h}s&5jg&3d+^Zc@ zG}GG&+peP=$*@ax!oBek%hYN&=BAGuXDv#7RE8B5Sm_FE|2akdd=}J%8j)POl;F5N zeB_SQ7SKQmZD_Z!Y~lC>vflT(AS}s>jEgB%o+KP%q z?muKTp)$oRiU5GT4cnClb^V~&3KEmI3+}ezUrVD(kGyi@j0Rsx({vsSwt{w#zfN~v zx;{{E$L(tO9dlLF@vY7;3b?QBn-YB49d?x$1g~0dd!+RYv3%40(a=5RaF4HP+-@KH z*gL-UY#v#5>_z_D*p(16T(|l?hDfWCKpAO>7EKmFnWiI_Orrax%yDv6h4{^e*-doO zJ9=HO=d8i~kg+9z4!7^ZU?c?&L%Ocs9DE)jAMcV2*)&|SY>IV_0F$xIxxQ6m#Fv^u z(uKm`Z3MqrE!Fgin?I6&u`0KQVIb7YmPF$BK3ZYVinNz-qcI4oQ?oOyoBd*UG9?$w za7?xY{wkq#VulNm5|8fb9Y>Abx%o2d520^SzS#B000;n zgMtFy-AGWH(v8-;Dy&fF=k#11`ue=iz4lwi_M_@oB~B|WqB!J$VLA>8KlFFgmu(&u z*&g?klk7cya(pY7)tHawX5+O41aS^FCb};H2*I zycT5oTrjkl?Q?zGN^38GZ3?l|tJ^FdYEHzD=o$p(q?cKMtakkOh`Yxzp6&<1j*1qf6*-|kt zs2)LXJ}cg$^#&qq+}H7kX=L>1xv`CNb-BSjm%n+JDU_MdKlij3Kl&@^mc+f2DFCUK zI=xmD9!H#_&UFA)>iBX1BMg}dIq&92YJZDKn5cl{O$6e7)wF(EGPz1C)A-o87EcO$yYIh4W{8^r(o@Z@oN~B?UxS!w7Kz98(*6qT$mXcC7 zW-Ijqi%L-B#q{xhnlah@bR#L>rfH4D@hm(AxRVo8xQ~$+NbN1t#o<)4)Y2E=rynyp z-eF%i#sGq11*qlM%!qXdfR5o})!#uokS*uoGfCi?&2jpf&IevUo*BZ{0mqSS))Oi< zfVMbg+aQwajuzH-L4vu)JX&@_2X$gtw-}G|le_pX0?*!>u7Cy_Z{c%(Mb};+hg!L> z!L8vK-ibpi0kJTozoSK+`Du})6b4^|Vt6Q;)(y>v?8%OyDZOR1xqqws{kx!~0Q#r*;fa~3cb&$g29>?b#1HSci)uYPK7=loj zLlV?7kMXZ^h{7?RYnF9s?U_3!AlWpFfCNs%Rf1FSXvk=9xE}`wDM;u;VYIIh^fXkR z|Ll)bCz!`)R?N!jSx)P0t;O@l@Wf34itd=HhbIJ{kQ^%tG9c2lWNyj1|K=SgflV8W@k7`g5gqRuiXnAHlY>Zlj;8nTc!cIGI$@O&452LFymG6ADE<@E`e3bTq*ImMXO}lKwI@ z6a(DMX719K^3FITH9q-JU_8DaVErQh1QGO#`ZDjTKqOyG!L?7JK#Hgn4-8|`FW6n3D5wczO-GM zECeHz%dY7!3$=356qL6aJnx`TNsy-RvEHV`4uq-@Rv>WoE;MPj#!RsP?t|Ex&|Pb@N6FQ|OZ86IbxAxW47Lmgn-+e?~2r84(`; zP=~IgUhrN!w<=d>_W5Sa%h==xl!y5nlKFo4?_l31tE#u@e+T67m>lDpn^3gzpSYVD zYUSQ^Q1DQ5bzz^qRXWLi>F&UfB7|}Yb-R2=`Aa;j32G~PXO)j8#(>#xxfIuJ4IPU@ z8f-Tqx`%5Xm%KC(3^5Jz=EOB|!?_2Ne{I=6D^b7_0ISnu-+=3tzzhN3rfF|283`N) z-%4k`;u18&XRd>zFZ|E2$r0$V`?W5)^%Lq@?DiCU-ZtgydvbFc{h9=b!WMc>g&)0|Ud$t?#Sr`R^WBLnuWr z{e#EZ>R`GEdR(Ji7f&igjefw50J-Re_L|t3*mEIRq!s{i^l4o_o6X?eXfH!{pejJGrFp~Z}!zgF5w{n zfq}~XmIi|;lhH?!vgmykkEr}!=mtpS+^~`{Y;Z|%3PQZVf^Ml4$< zW=8z-8xCnBG>4vy0?~>riI-#)sSu~k|KFkhigGm`EGgIUC? z;nUHOs31DnzUG z&{+ZQgjs=9jnm*ly+g$Unu6jFJH!qmcoxkFT_D+q6-&L$^VPxZ2j-Tsz5-0p?FRM* z?B{wvVw>8rTG$mx_AkS_ReN&de{A_f^Z0vL)RKLHTHicYy9WMbVFa)H z(}8}VzLIhKD5dKT?eR61v^I>pFrlL-#6Gm?C`{2&;zmeTo2S+!EsOzPT^VN&>5S)4 z4;xr|Zkd&9cjfOa?-PhY?3LnYJ4(q;J)$k9)Qm4@JH}!$GSNgd+~N^Eij*T4s_r2M z%7;qGJO1!_5VLV<@Z0XWX7ryVd83$m3Z9JL&gIi2V(0H2N z1?=LED1DUg&)nh2kdPtlS5M;TVP1~s<{WL=PDH`=Rt|{*!W+o zu+Jt8SrRrYH4I%pv+dC(0bI;jV!jJ#Bn|b2FDFqGT6Aud4#{F*4&2U^AsVwL?&#+6 z%4z7nEMqs>wCqR7(G=p^UIy!F9ev6n7du4uL_iWb8IGOjNnQ{&3~s}CR92a}JY3kd zK5V0%EJ57{vn%q{`K>5NPf5iahgJPLB>YwP&_RDu17M%98)3)YiRb9~A^`7lBGA*< zTHP0GQt6>P7SNe^@aG)@wL^D3FJ}IDQEdCAG)$TtkjEijk00$lkHC_ zd7z0b$WksGt7;9a)n%B4!V!!j+q1c0_ZTr0oYP2A_J1Fdht;i^ei16OwoZw}x?a4< zy3RTj}i_%Ig9rcfhXtr$RiC z`m;46-LkCzm!3udD2};g`MxZ!$|)ssTZ`s5jDxK>KlI61k_=8*DJb1yh#H40E8Qlg zc@7kj=J_c*Wy~Cy?{9)qr`Yc9G5L^rtKD@~HOthrqPpjkM>*W$U++=jU>TD;!1Ytw znHUdAgx2>-MAVE|KQ2yRsYw~kS~z|jt(Sq_u9FEY2GhXmu)^p3e!+#~2-^yCyW7$B ztc5F`W^{0Ry*7~2b1s$Rm`7Du_cqR;afCchcl=#%pJW`x)=I781G7ZT5})L470ZFGh&4szX|a2DgBXi)P4JAqf}h^} zySMo>@p>x8K|NJXj5McI)tX0lK$nz2o?3$->WbCLQVp6{_G;h;nUEH`J0#BilVMhy zvGF`r)FyFu_dA;0H{9jGv69W|>&Z>a(jT@<#ku8tw}P5Yl$(3uk-rC~?tidEORQN< zuyBm)=-D)ZtMWjSKJq^DmqEG=HO8F{H$BIWLmIZMw` zyqJ*U_eC>#W8zEsm<(oa-_P9k2h$^*4_#;q0`Ln6QWD8|SjVN>=VM2OB9iRb#DkzI+pTDZ0h5;fS-NF?_zo%i z62AVJ@XzT@i*cS+&BMP?O0}15ZWdXSp%n!OlAzo4{Vd8V&i+b3=AXitPMV|3)CK{L zf-tw=yu=AF$e^}aDy9Nimn&2rt+xCexnEAJ$+8LV^Xh0h>{(4T5O=(jWP#4JJdJHU`A)P%gLNVv*(;_ zsi(!eq90+$XbD*pQK?mU9Ajs|KJWm+`C4b0R%0`w0!G}Wpf}sHWsQC&QYlU4S@w2@ zI_!n=Z?Q|K{JOA=+O6^xX7mvpfHAWQ#A_>Q)6g+zWd7U>a?M3x>g*>)?r4!H<;a#_TKV&XATH6A)fiw|nUg)H92- zFGX3>lw+p%uVG^YPX{*rE^CcHd!9R&ysB7m$IMq5D!T$6436s*BUci7jlSsGLdY~aK#RvI;ld6 zZcO%JSA0LaX%bo5wLUAeYWn4`t_-87As-tvmMIVxciYH--n}kITs%9~JP51tc}M9% zI`P)C@_WB*COs$paO|WODTc+cO)=Msw3Op6d|ge6R%AZ69!ld6_B-Pwn&ZnYBq!2a ztPegptU5n6T#L)OL=89+lJ2_{D7ynk)YCoTf>z|!UTnGvYn_78*CMY`KUt*B)_vV~ zGJ&?U%wzBE^T`GI4-leIxo#nsbtWT3c5?mbMAwlTMhq|BB)$WjW;4Jd@qv3hRrtUQk!jsB5_lHtHD`=fNCUnm>LGO;)b$Ue=`i8}G zsj%WG&H8i;!GKA)=8Wl-3h%DW6!H?KGzNU5PNVE@BRXm)w)r!;MOhri?L!A>KZp3c zBB~V6u;|Ww8pp?^iRt*z)F}k!IP%0OIkmDaLK{@~T;D$&~H#fp*1+zO+sOpiC}@ z+^4SS1<~jgZNIJ=$p%XUma(;eMFL%@Nbs$lZvSMBWOcOT8s)_rNKR=+>|1GOpTcvB z7s=klufk`UJ((!!*vETla7Gv3d=0AH);n8aVLZ>)QrzW13&p22!tkw;1jIMu6d^*~ z+S#wBVOoj55AlBRy~EFhwklBEBfgvPIE5<$-MLIHB|;?{;CMwIlgc#{7v)P9ECK#& z7pdB5{_Dg_$O{#@30|ut1}e*@{rLALtLWKy)ycIHYxA>Ygf;fjmi2Ka0$uLH>|eiJ z2t1nG_^nq#`RtG5sL#CrE*wa?czfps73|}ia$OF80Sn(-WKjq2>24PE%)@)M{>N2A zX%}rdHduc6PWaRbXb4#oT72*)dZPR11t`JN@P9k*=NH|yT4Ep|0Uog_B8B!?aA<<# zQ0q3+DvWB(hHmSCnHacxBLzUo)IFKEn&XLt5uf&%AGMOgBRY{(tUAomAhYB$YY6Q@ z#rC2Ti81JKb>B@ZjM~hQrX$pa{}C^t61@jF0V)K}WaU|vDij7kkij*-3UOy7Xa;$n z)%X*)UbcCjnF*f1e*dJ*+2*VN9}#IN7T;-mS?_r0vNQNQ-w%PQxLLEqYn(KKqSwE} z-2Xp0|G%L(6rBdY`5RCDlcE3bDf4@y4-c<&{@`E}D5U-i`XwbMFIp~Q;Qzk>CRrJC literal 0 HcmV?d00001 diff --git a/toolbox/openpcdet/docs/open_mmlab.png b/toolbox/openpcdet/docs/open_mmlab.png new file mode 100644 index 0000000000000000000000000000000000000000..f09edbcd2221822e06c4c0bb3a1d57fef70bf3ab GIT binary patch literal 262300 zcma&Oby$>L*FH>xA|eJ|f+8_=N^>h9(ji?6GjvD~J#ri5&_jcCBdxSS4KdOk(j7xH z#CLK3-uHcO!Q(rgKbYb==GuGhwa#^}bMG~QYAW(1*Qu{#VPTOdJbS8vg++wK!otxe zA^<+gNIp8j!oq%J_2h|~%@g@24z>YL7Q(XGUvH8g^wV7m;A-6~wH>@Q7CJF&`QIux% zAplPx@D9CIa>nb`ux(Tf3$%56)DC0w@D2cYQ z%@EJsA`JtN0n6Efbrm4W%2@2cYa%Q>Y-%ig;1xFTCy7n-f3M}RS+Q{c`5Xre>%A2g z-v3;q0z6;51YJD*-!o1c&i`IPgiOQz-`6$MT*tKPDllOHh z4F|`#clg3tlEYPcT_O?TAN$NRqv}GWryt^h;HCueGG!0PJi2!inzf zNQk&%XqX<8bXq6q%g-d1E4OA;Q^h&`&WwJqTG`4#ZpoeuuM}5NTQ}>yPlF^Ol7O7mt#1@h4c;p#g8#o7n{t< z@NfQfS%34mR8!-M9pmof;2}H1iJM>kVRq%})6N(aQl8#ACZk^Hi4s+1B&$n3s5joY zHzPKP3JiG2P}1kgL&n57D;Cu!C*%7h`gVQSeV+EUFgY#=b|hwfy{>m9Skkfqd*&^7 zeE)j?>gttiPWSZrpugG`mF;CpK-$Sk{!)PPG<&k|+zXEFX%2u;n`d>i?#$|N#)y%+ zx0f3Q)ZE9>Z5i4j$W!ha7X7kyI=8Naa@ZY#OqRa}LSo`CJ<*bOQM0SIroO|dmfgZd z^giG!6m&vheW%~itMJP99Jkbnm&7644@r$n(-5qxi4wP3n2o*1@)uaF>QD8jKG$ZL zRM9TSiosplg$=yGrsjplJ))bNZ`T*S*B1qXPMQ*AoyOk1J&Vhxi4~%F0hPb9>iNXC zST*+)o3Q@`r6HDTohDL!_ZurRUZ{{{wz9}7DYoB{;UxQc+e(XdP^XK5I69Cs*6rra z4xd&}aYsnUFO|oG7I5d@d8Pw?`*dot3a8ST{=Af-V1osumrzw(5e<2}!5}CapIqVl zn=5mFK?azGeQoXTAMyqxUPBgty|*Ri+Q>(^0ekER(LkbD@lfqp2sWFs?cP&uK~xVF zxOn^IXi`qlAH2Bin`+epNgSCRcX3STcmo~q+*BZy%@%-((=ACbXDjD zKzR@QVi(y4}n9Z#L6SW$L?2FYvj8Z zA+$_e)(Vn!{2-e#tU+e3QJiB({^E`@@;H`l)TAwhZ*ooC;fbgBl;{YDfq>;gcZL!A zFdnAG?+Oas>p6-=mkq`i`@00p&*J{i5{~<^lMH-_n5jHs}F zH%a6VXy${1sU?`tIF8Hmll=MkHDl+~uiV<&1R1(Z&-9Ue25{5((%o0|^>>Y=Oh$#p znMi3jBgFQ$mGZ>WMzVA6qEz?xqT8><$2z1-UDP1wv3KODb_+ER zcT!ewjl8?EVDVTmJ}LseE08RW1#7=^Sp6jVEG`Ro;>kV!>`cqsq#=P=5umgmq#x70 z$3r+!6TUB;qbepPp>X3GwTLj-%P%?%oM0HgJKcK3prnL@LH3e(a5v3de(Ynky^nYl zem4L__$2|8^jKR+|Bc7h2A%BA(-(+a$}Jf7Sq0B>5^N_TpG1pxe;T&@qVv7LY3EIj%4 z{n7kq3H#g|wt=Q`MBUG5y03+|ug=4Gpo%|DsKkzUIK9LOy{o8*6OIxj zi}O(|iH}wPV7lT{2cH;WO;O-LNSTzffJXLz{ZM&#o#Xw2VaKj8nv54pTR9Zfz zMaKG+&qJMf>y+yKvzK2MbGwByWcb|~RwJr>lkFCc zk|xI2Dm6*}?orZp93$Kg|K)wAo@G& z>}du6fYWw3@}30qW@0MHsdcdeOoi$7;IuXbYjA(ni@6TQ_&V|94`t7|i%#9&I#|9UH!L2YS(D%e!jElQh)p4`!J)L4wHjr>^8Ou9=mSESyEh{TlG}YVB z=rCo;-vx>=*sFZmQ@8Z?G6jH9-3P`O@>z-Y!5_wVii7(-11;sA9BblNs9f_7WXXi1 zClJ~c#K-eJj2qckDi>8DY6N7ql5^?!NC9w|HBVnGv*=g|LDzUmd6x^xidpH5rw3g< zCH+CV1Bquc6JtwN47Q-2WU-BHa8*jS=V(^}0)^<})@M&NsnfF8uYX2eyLnE@i9~k>-6^u%0x1yOk z76p;D#@_Mc*)V>GM(;dOs#L4~r^{%rrg?!M{qxhNmjedCkCp4<1v2*-pt&y^Tyi-Q zu~_hHb+`UTQO-~qDZuVRK_Tyzv|?2qFfY17UC)=YD9~&p&Q&HG{i{|o{8J*6oVd(x zr!J-b2yu~?Vx#MCwu3j1v`RiCZ7bUA^x>EnQk;I5ZJZ$kWynsxm;9(;+?XwEUpvD` zngTrvdfN~veA%4&oYHAZl_gl1D|8tf&ssXo!t$x_B9I5fRFh)Q$ix^)19G1giQ{YP zRy_UkvG8fOD>9)lvujh?XH&z47ZK^mII$kDTW8 zxq)K{VbAbruE3}-lW7b3ST~1&I2VZ&;sR>X-n4jw5?vOl&#wbP31jAhSAa0i4Zxc0 zOWGHq`Anrgh*#xaV#(LEG@7%+JFl#@gb(?UD~AX_U!n z?|?&N1NCex?8?dOptBm#9LenEWNH!T0Wh(*PTl1U!b5k#S>N+v{vg9xt4(k3`+hOQ zT!T(Oh0_~d|G=gxX3AMEeGTrhRXqbjmTBdLLmeEsD1UNP)`7b`9*RW)Z1?Z!z_ESOnB)Z zz}wu{Fm;lrhhy~?^BH3LS(OeV_-&b%Z(kLYN(3OWs7UR8zT!Nv?N@!lDV-it!Rx0vE(R3+^S^-uqJekTM$xAzN+#AQ0#{pmO+Mmb#Y+V@{yd16%=?ip0q zWjD7z80bTNK*a2ctF&~yB2S3ZT!<+kZyzhszwa>A6t7cc1PYyuaPOeG3`rCZ0YsH8 z?IF54^f*17u~E7JR+Jf`acAJsf&djwCBnhGC$ynVF+}+QcVp(_Wd-o8nO_Bgz z9pIQdj^P|6A--%<)J3tgb6Xwz4(bRq7=7nD&5=m!5tW_l2mb{=m8Ws`S$vCexRNCx3Mm z!ikH-)a#dRP$7H}qtj-3L`8X};>O4j*TIGqrx@Hlt0}gW3JjRlBDCjwmqkozhC{#I znbknQqX7&_jar$5g)~F$xFr)}U;@!vEeHy=!Rr^Cj9%P)x(-xgtB$ib1SliS zaO3=H?yq=@{OK~>aN6~)w0Oc4tC&anwLz8?oCj1dkyzB2@SKNLBl&jKu2azsM3WFL zZIErOl+RQsdb9(P^X|bVJlq2|IF#xQ6}oY+0Q`zdTD_mH?|P5WQB)49?Gx$r7~9IH zhWX&0Q+iL0Q$h2L5ljd$MY}?Riw^A1u_7ms^IR&yQ6Vtx3&4! zM-U_p8P7o6Q0zG2(DH^SBvQ}$oi4FJJ6vrb@egu-56}K;KtkH{@p&wEWijJmI6|5& zCK7%38OER*qsQ{QH(aJCTQ->KTlm%0ZmhuVhMv#jSZTybYq`V4UDD1*OA>PUk#(D+ zR37t>=h|;mQqo5hVMgaVRB5AX-RKYvUw`fH^`L6cXn6U~VxG(JXJi~3DUOK`RLd0| zj-7A3Kz}c(^Des?>U*iU$Oi$2i{Zwq{R5MCf;ptF#gN*IafzuM*u19Soq2LBI}nKT zlI40zeAYU2SK};@X{twzHPvS>-1}I}YhLR>n(WyENH#p~eW>0irF68`P-_Du1D$tZ z7wKMSE)jQW*<1mJjgck%N4J(3sAC(?Hhhoz95y0~zurGZ3Rv=l;CEy)Qo$43cX$K)as^OU z(WsOD#5wRPcksruOIt~hTwuBQcfY+JzBEW8Ml}YU0OkyIqg!Q%pVwM+5}guU53L!D zMFIC_gM8hxjkbA6GhA)ac9+WG#T=)By0HqJY`zCH$XVH(fiF*Aj6 z+>aa5i>?ux`;+*QE- z9ZZ`#flUC&f+0uPcjObXLaNOX!K8_(bl{Y?o(AP%AJ zu-`Kek24;%MfXRi%qzC?m-Zo|U80U=I6WqVVtn3fgey_I96XS1^N@_eh@1|a`UK^& z-G21(l=2C>01m%y!TT0R>LjRR{v5L`+z!aGK4%X|!xd!sVyj2MyT6oDMqlbWc2@7F zMEi|4I9!JZ6<=DnZ2gNJ2=Z*dD^VuRcx{&i-sT`V$ijUzV|;Y-wR_haQPDGN%EZHh z-#p9$$9zH)z3b^0UNw*`c#G(n6l5O$s^jrw`UGdFSRa&%$xEDHxfeV`lRd@lbyw3H z^^{to#DoW9xV%cb;isYEevsOzS#_z9@2>#-NYDo=B64LwxfC#N`jvjHcpvn({8*;@ zgKlKH0qC==CaGtss}UND67ExqM%HMTmWAwR^L3+${~5|^ELXVSah`DdK~ZBT?#rN( zB>WCb8j=t;NNlO?wnuik#3?$DFh$hwz3nBIo@)hEzt+cSN5?D6#M6iG=DOPZ|UimhUvuVOCwq$$A#cM0Gke;@R_2Lb$%Up~#*QCITV zv7l~75i{RlHsLuVrI>28Ah<^)0n(q@i{jXN z*Yx%p$fO_ScgA4M9h|u6fs3gbVUD9-;}_CU!kssfIZY#3GV4D(6GIv6w)X$^JNL_F zyD-d(lOxZc470Wk@@uSqQj9^=qiX>v+mx#9qlGqslWvQ#UXx;JDQkJ%P~}S~%92{o z08X*q;s3EW?_1|DV6nd873)>%o-OSso|u8Bz+NM-u=&ZO=U4Un(^9KZc2gXZ7LBl5K}9KYmSrrEN|+&c zjXe18_fi7`Hp%zK9+vnWZh)&gyGIHZvPuLE>v0 z+?QU}lF;;JW-0<#LgOcTq>r0F$Hs3 ztK1w7jr(-U+~-=>4Kn9P8|8RN!z8Yv--rrq1y~JDI`{oQpvbf74Q3;>kICpGhKU{) zqbHuzU^PWNCrdr5lUGL~UqDOE)no4pJ2Naaz@vnt4~|1!6>h?U?|Gan>%=JcRSV*G z>u%?+jZd+BXJ!V|@m*Sh$_uH}&V(WCs@|2)`l;GkdJ@w_1|8BN?dVJ3mT&c`7@u4N zmn#j+K&+_PGQeQ0kQdLnM+=%=4o6&H%1rm-O&*~``KlG9N-~() z^V{Fo5mk5eFICn`i2yS6$#sSOW{(S&3wY@6jnKAM2CFE<*_hYR)~}MJt2HA5^@Alk6K^UOc-McLOGDRBdl76i>8;E~%;ywDnr}VZQcG^3v?G zWlXyT)&h(!<<^V0g4|m_;{Q=Xk5;znCA3N0wmm4k%CRUp*a?34RESrN_(SK4A{XnO z@>MbUM*}C1!f=xma2)E~?08>lkz-kunly8oYSB;nX}pt{%gl>tM2JKcbF}{ES>>Ak z=gx`AL&@b=k?6rsULFv`$RvBoajzr^AVxm#`08l1_nCs&W1E#y+16z|U(ZyDU9kX6 zH=6jYRBMboEZ{B>`JbHzUg>?Y^gZ6Nkw5-IUrCj-wo|02dY&f!`g6VRuS-Jf|Iay` zCar5B);t|k;?8o#m>J2V??yS`Hr$uJx$g8>v!y)R_S^$K4gAv?ca;=it$CZ^1+g0`AzRSHa%fB=6IL{0Hd@@er z%0ftoUbA&rjTxG9m6qIEpY0tLP733~&wI)f5);e+-J?RWs@OHd6Kby>j$>k(3VqRD zQYU17>$3}>9q1eRW}g}qAG{~1CkqJ+J3KYTT*Q10-=(`~a`RZMxPPU;+%u2mEwquer!?%)X{QUVs8?@N%mZ9Y>OVzbcDeTLr_LlYlM=MbXNuOW`C6 zsAYfs%S?c$Z97gJ?DYPcmD1Dx$@X&B}Mcl?2A{u ze-;n$pLw`@Wryo|nT});+@SX{ql?#dPd0`q^VKS#@Gwz6diA6y(^k=|I{!uEXGkWI zQe;z1L2Mf`7Sc$1m%n{D`N4#+fJb(YpKgkXw`b(OobV<1(Y(-twc2~Re@~P^DE9x| zG!*9EQLr)2?QSb}-{Rda8Qc5`shfvTTQZKZ?LdHJZhm=mY0IjO;h0RGw z3s7nd%*gTP!(Wb`+1ey0^XB}1$_vnnl8(h5c3&01A?4ENTOYj9?QTa5@YKi+Cju&( zwlGGDz8+UV({7%B9-R|P0u>vTmu?E#E?4XVrh9!jgfAVNm*ki|JGQ60&+hvh3%3n1 zHUR8<``e~elb*WO{LT(pUihDzC2FaNUKNR~uX`UkBI?$r_B;H1<}zJKN6lhgO0n08 zB1>(l?ndF5)G1BX8QY=;t)%!**EGr)dD?Mw3iX*d3>H?6o^CR@IZ$$L@RL6P{5Bz#S3A70z^3T!f`|JY(-l?H1mP2z3ek`018eCBgpFxAU74 zq|C$+NbfAFO*r5a^2iOU4AFuRiL9p> zE-_9Nhgv6VhR>+ok46rXW5FKN_+7e@FPoV+;33a6nf>HnJIycuzGW0gi+$!`=R~`F zNqhx}Xtu&MaH0STFz~rk19%oARU-1MTS^ulMi2yaC8H%19;`zL%`ZGA8rd1p1+s8_Z(1@_CbtBMKbe-FhA z+`mI)YquVu;|p!{M(6eMt(o<*4L3(oW^xhaYW6E#PcQAZt!+RuQgdX~v)#Sny0ZAF zjRr@P*|BfHMxxucKVZ*=A@*1&?{l>+vl_&DdimaOD;FR=VS}-`w%S{~3EFM3|(GLo?7*0&0*S z76A$}+_48=t$aEx0=LVELbhYx^psTy|NYIZ>gb+Dml#eIyW{*yR+;y&x^&gr)}?|v zI;$q7i_8vP>d)MW>b^8J7WoTceVuvr;B?$m*u@dZ3s)m1tkv+Arw-gVg}-L*toWr#?)MC_#laV=9%*NYx~7UEJ5| zdjF-k`s@PCnrkBbYEm`5jSqUZ5R18qWL`fNB}m2*e#Kg2ID{V7+7P?m%UT(O*ee2= z&8N}RdpS2(eHkN33h+`taImEfiF&pqwT)Snmx2nvSonuJw3Fey!DNDX^qr!=8 z?>(Ft9AUk*5I>6`v9K=Is15Oyjd6Eii9sJO@0_H?UdYv11z$@q^B10Ae1g4*hv%(B zVQmN9H*(*ENn)@nNW^j?EX%hG>zg@ddV?#ce?R_ry3SSrJk9D2DtZp;rV@o!VIaA%xSx_y?_ zm-lsJ2YjjYA74T??_{mpI@EN9giScPOilO%py$47qJ4A<6K0q{V&b`e4gwrEpTFg# zIpro5o8PT#TkU+xp0Zr;(!#> zCkex*o4|ZNTR(c$6VX74RYBIomnw&2_w;93gJp}whte2<4@+YiIP2TNsK2%Yf6>A6 z?hd=GkEVOoIvmw=Wchkg$GlIG6pE?M*`xvi+x^uYy(mfM^G~V)=AfBod!v2yR7JGD zBt*7c!1tV~(t5-x*W%fk%K_EVN5ZhSAk@Zh2}`-TUlw8QTWuc%1kvu~Is4B$lP2K@H{0oe6ChFyYqVmP z=UDer)15b#&kQVH>^aXX9#3%ifr`YDAcHxK9L-#@nZ)r5`N$+Aw~?sMWMxCMMG|y& ze8m>oAeWQq*uIFJcH!X?!&xco=Zv};f|OzqS>;Y7TyDA7RR#?p=(haX8OpJ+ zx`FHt`YrU;f12}vKDuNXYx;5UDVyvK0#C`3U(7J= z`}b6~W|J@*U# z#UDCtu_4j%BSY|v%iETwE!V`0;4!Rk%wMH-tK27{)}O)kNeI+~*iN)=t#t|6tnBS_ zq6{96(u6l`mBX(Sa|kl*e*;cPW$1osDm(G+N=A(DW_usn z0G=9aOyWUhu=bwaN2Awa*am@O7)zR85Eev2h;@4qZPd^irN(jSFeGs~(jj>*<|7)H z!si%nx*~;i5yVlGsHC&}&RYJ$1lgTg>G$+(VdSPya?In=uC4-#LtFktVXjC7$S#cF z-`i@Px&VKItYytM{YKaUcaDq4Id{tKBaD#9eaMho&Re6)jcZy_2&&#u4)AJ_nD4-r zADu-Iyj$P-`ldgZ7pm@%gjh97%Ht?l(7 zO2RLi9OM}m&0+ipw5zdq>c8i0F0DM?2pGBVvF@_EI|b(cP?bR+vQs%LK|g&QOd4G0 z(F5dy4!f#&@}sYU#Ne;AZyNtf`p@=}>B5Bo?_7LO{x@X~@B(QgqVX)hI}MbwRefv( z*Ms9<*Cn5)b?zF4<*!-k$TtEM=rJ$vS`(QtIkP7Bv1p9ZzSTNs#m93Iz2AXURmRNLeEn^?^ylM!R%rF1SF<${Q;fo5%Vo6 zqGb;b?cf!C%ADBavR1`=@HZ=iXR2)-2*F@{po`13uVUGRzUkkSh_zHY6fNRc1eulfoVsA2 zvuw66w@_4v)W-j43!8oN4D+d2=MqRiMDO+w)#y{M{B>Fx65h>zfsxhn!k2+z^8{Am>>F*K2EdMy_y9%^Xdx)dG=rGbAcu?Z0Jd+ z)sbI{$is^!v7%K9&*@I!vE5qYw^KFNvi|3yMKK&-U;oP~fU+AP!o+yqLR4a^G}0B# zt#iFtM$vlExjsf`b@j36dF^&`n!57-V?`fN*g$1hoK?RI)X!k7lqnf6&I|I`ZdKS)0khKYJ7prpYjt&P9GOAdrehg}y6cp*v8avBU4*MoU(i2I*M!}55NP>E z_e%M{=B!3{5eT#0##aI%2FGB%RI%YlT2gVepQ6tj*k^Gbh~ebuasfyY*9B^-lv0hH zPczArHRU5GH?#O)13}NNK|b1i;CJfYT|J_ZvtHxdF)G`;WS2ua3lR6Y zC&I%v@w-4WYjEnkK>Jz&j|EfI^e}ArWvrsFivCh#j=uI8fxMpeE&Ap0wf;u0{3#O# zzuoA<FMZ_G%>T z8n)%1vRdtvf0*sOWmZM~TE&eofjhvP$uH%2P{1JUfTZ0-V(b@pvX=a01>~dcsx$1PwQ5;~0F5}Qv-H9lz4JH%Ghcg{#(b=I7FrvK!O7+8U_hk9cXg+crIT%DY zZ-!?z7-U`L^{XS0-PGa#Ca~KbrW4kmX+9Fqk{S!bgR-!4(pMA zmM6Zk6I%Zn}+FZn~A;<05oW*3O(zvK3?p=mI`P z;*0WYjxf6Y^)DvXba8;+Sw>0gk}L`cSBhkxuxA~r$@xKtSbC4svjYr;Ok+zh{_!L= zRxJS#Ta*<~7ioIB@5m%Q@khJ5`oYo)$~?uqqSYBA=tHBP{vkjOud&C3%G9xz3)t-| zJ?5c4og1?&~Tx@JfVlh`{56B9tL&ggw zGN;LBHIfNPNJ42-e2${9MCCLaW6O2qlpf)Lr#>(8c|=+1j(&2h=<(^^7_fl^=siva z-9pZZK*3n!8&Q~qQj=GZC-^@)G7ZH)ENNYQn2z%=i2i?th+%$Hl*RGW|}d3$=Y6_}9Gwq{>YST3q%J7qm?2wS8Em!;NoD)3FFMuE#XqvxJOhw^A?gg#Cv?!>0 znr}CTc|V`J?|xjME?lP~A-p;pzec89V$9o^i~6^2uy~^z9%r@NEWIEZoo*s%Gt^2-!WX6-)U*po3ik0J8$HJLWGiycvHA(- zAmL>|j?KAQf$S(YGot#;9h#`Q+@#MhoM>%jh8Lg5H4%=qGi9;9$Q zcug5mr<_t^c|Iyivhdk+Tl{ECF&0x`z|g)MGdqC>`?&NP$=A*~9;m&XlL}q|=W(}p z->+m8&k*sI>p0HSdu}WF1vn^sA#ov@AAwKbJVt-#)(36Yz6z|f>gr7XVsWBZ_MZlG zp7;w4&t~ztis66bkf!uYQ<>PBD#_O{i8Ld{!!w7eUwB7@R+_QmLz9lC<14EZ&fM(1 zrsnRouIP^Zyr&n@iP_gNU@`O{2Qmoj3O-n!8VHH%&HX$UkP0E?)>2dEJ%m2gh%5lhF~t z@203Z2+ispjhP*|*V->j+A9YE+uUqaC-9Tb?3 z?mgf)Lgtwh8Zu-!zD8HC>~|a{6tupZ?O+6@gBm=E@69JA95O6fEaDXj>XN> z7IvLh(1!1u3E98YUdSk+Sg}wBpenK{dvz$M1bV#I;`5h|yx{rIjqfi+`P^)9;ME5m z7Yd5sd1wap1GDm^OiajB;tnP>W-L?`X={Wy@zt7aO{{dJf(H?iSl)sOlEHdO#)rI_)Y|G(gbJa1De$+*7<^-;AXbt&a!y%-J0c4F*N9XH_&8t!#q?9 zZt39TnUO9v`iOM)vvi{i{<8}h?_$*)&vDsYKGPg$S?d74=|DXcpEgDg^4Sr-?72!w z2{iq#MW1XJ_wkt{zHJs&9n7qROqjSl`NA32ece{Ul{0Y)UMTbBY<%6pxV}jrwE~7Z zN1YtLP&^eZoO-)iv4@QPZX`OZlp~%GQ<_D5QnhN%H@S13GE!OE%x*rUu!C~DiG=%} zeyGEY+&l^vE{v)yHTXRzpgOII1sGp(Qc_CIgwc~DH9|>2l|E7 zDN8hhVTi>pn2&*|JjOQApT=gBdjgUdV^O9jx<%{9Cz~ZPQhukRNA5HR=IWZ(>c)gd z@Vcx!^@R%k+Y7$G3e_4iVIn1G)fV%sM@sI-@Bh=*3mbU=R2{Z8qe&c>^Bhnscz&a= zjV37fp5`UtYnj%j)%?PFy-<&OJgA1((F!>2xR*--w?V@cIjgr-4bU}J8|y{m+O~m8 znR&kefC`K88puu6kh<9+|76u*NBeH8CCBV&PRz;4thnY*zf2FJ8h&PGGH5Ykg)}fT zs>^4P)(@#sNy2QdHCO+ zPL%Nsbgarr;#i;7&Uo z8{|w%=PyRtCni$Q)6wVydwta^`zLLcWte)Mj!g%B=8Io(S!mAan5pj8Kv`HF;o1%H zaeA&3xb8iwqK|3xu90}I@Sg%6648{T=Te-+e%Y^xl**+Krau5JL&7A|3jy#E*jzvq zZyOjlTyi*OpuCV&NjqA$-g5TzMMbrLjMG6vXAve<5eY>QQ94vWS|kK%kZuV{38iyNN{or92+|VL-Cdi+ z21s|;MoHHkxxvQZx9fg>*L~^z+|NI}_-^07&N-iR-sgSJ=RH#?wl}&+>CvF_$c^K{ z!&z9)oUW0v{d3&=BwykZPHU0A69LVZq_yb@sxBDhz7&Qzff@=C?Xm{dQ`QZkrSb~? zS}zXrb}03lAr=nOtb^2@G_F1Vy1|D^3K~7YZLijWR(OFNbp6WLUqI>zEnaL=dHhh@ z%Z3XA$zL380QdN=8RhhP;;Mj;n|JkW`gtXQC4tkEX$@%n5Smr+8`LhM`YU)=Bt2;V zGk7NQT7Au$I!Ofj?{nE^?EeEJ;(bkMVo zKtV}@HC?v-=94O26n4kKepj`5F$_rWS4@>Q-Da~3;(lCH3UFO`@ty9YTN#i8*VOuY z&ZtfVVyI51#0ZQDFP7fT8JOxn7x+pm*guuK==f)?4AkPS4cUP^;z+E83%)6~oN4!*j~h9U0`7kom@} zCK}=1QpI+%{t{R=F71cZc>isPT}9~c7<%SQgauvu-=a_NS|`X{>$@LzzKXW!qhoU5 zzw*>ok+Eo?|2|uY*{E!v4OFXsJkWfV%Nbw?9xhq|<_Y)fP82Y|7zA6X1xVGuIWB|3 z&eF#H5G8c*Mmyw-a96#wQ)NFp+>=7bOkZmp28cD^o{_9E<}EEmZ&9e&}hD=7mpely?381vnD5L>)`CuWj5E69wT&29++k+fLi!R)*yfve58OD%OJqGS+zXlg+M*< z6ou%JLb9+Xu;JnNw#FEof0O934<+o4)vic-CjMtk87eQe(O7f_9k!`5#n&AN#t8S7 z`NVHrN!7EzVv&fc+k{U;p(qZQ%;U!|ab-cXmi;;Ic`H%HIl64B^^E<^E2!_{0YeAz7wh($r_$dAPc%h|`Z(k5+z!qnT-Y`g!sU;N z|5_VJSVSD9A63Q$0hqjiID^m<-`Cz%8|Oc%Ps`K0*FFu)aAwXTWPJ$K+_KuWhhDE` zW`&>qaCbGM$JEWZv&LcqtQs#T9v?)e2=x}7y*3x z?Z(mQj3eM5J=d%jvG-5cj4K|Z9j6~xxaG}O2V%E;W`l%N$psNA|M2ixu)j8EDtCHk zer+p7;dq4XA$Dck zJ|tC7{P8hI-w;BUMbl4fi$0gi#oAk}04rF7xv|qz0Ef$h)G9 zrG-2!HC}&gL)X<`_6V9Yz*N{5G#Xw)hd5tNz7Q_R8=rCQeL zQROIH@VvJ@!+nKapcDKF8-Nxg%G-?eiP4~)tmbt#wgeod&nB?vmmQjn&q5zob^P8d zGdxfa@*M@g!QYe|*@T%D1D@Ik)F#zWa_9=?KrJq&%SN(z5=+R=(qzN8+6682cBOz9 zeZ!Fn1vP+-%~$~I^hMBbGMYs$f7LwffrrO`9y&u^>(U#dvrDfcRB72G*Q^cwY>f(; zSC2+}3{?;5CKk5yjn`y{#OaMFBXp_&QdQ0A<*F?ilR#5kbJUvK`g@bU*%$g2E~sO6 z?U2gROj;bpqeOTdM3B@Os=v*(*fI$*7L9^=L-{q`pa9?@Z!wI^Z@vtGdUBjpf*aD1 zL)dGUg9W3a2q=N)^~w!K!U~--P7)q*=Nwp#IZB7?5VRvKGlo&JJeB87EC;=z(LGti zw?uf+q=*~gGesD-78(h+gUG;O%Oljntc=TUcdvhVzy2!pquAb;JVbMHe#U6|l<8`D z%{ufV9+KBAXeJdq{V-E7Y0%|Bp!vD#62=Om<8J0ui|y#FF|-OwHz+xk3tYDKri_99 zqfYRDUIqDPK}6NH*b~*wH>;L?ZqDx%(??R6GoU|Q=%#mk3Ri`0a!uLX9UyHjdsG@j znlsZ{=++lv)h8=z03N=^&fOI)cN_pi>rG=;q-u1&3^fDzL`by_jAt{v42YLzd`hh= zn}NK%U--`YRv_1$!tiF3+(QV}WVZ1cDsY6Wf@up=r^J*aQaKZsC* z&6qWtoAMI?>GmD6PTA&6*A_i4Y_v@^uv1LaZC{d{3*2CHROsCdodzfD-mx>vTRX22 z?k{w+v>;9FH#mNu6O}Y;zPkulGB2fz6lL3pLxJFM#GC5BszIq(S&W^)A>PF(P&XVE zM>^;Tt}V3kVR0<~r7$(KFnn~QuN&5=>VAr2IY`fSz(kg*EA(h9b6P?Y+aT z>92=ZH$&~dR2I}6$mSsqJpeXW&F(!HKshZua0>`VlE?Q}DBh{9G=a&Vk9+rjVXxBM z5(M&%>oH86wXjH=cjnqo|7JyjXXh;)hN#cy|3LizPq$m6$>M8!J9u#Kaw?qXI zMZP>OaYW=s9{=Bs*N#lr1I7-vV6#z+ zcdoQxY7sO&U_i{qtxTsbEU3Ks#n^s}y#PKYW6K6spvkF;vC($F z9|G~l{D$WE`S6x}f4QzzjbxDY)R34-iW}dxFAowyZAb%yTLfLIrLLqG8Ni$`!?iM> zcx3mf4o^5+G}xUFqN7<7#Lp+iPIpwJ{FCJ@d&`c394x6R42F1fF?xa+_?F7cuth6g z{ZhxZ>Or`PMGGdc^rat4Ww9?nzl6?smw;-&zbCZhO|*$rEw1^N>McE;Kl3y*fibH? zgi*l6kNRc$zlRdR#edDbOKi93{!KFf&%SK1DSy?mQc?zx@xy^a@3n})XC-qL$Ng=p zgQ_kUHA-alyN$590MC-@*Xt0#szWJ_ z!lMKvvZ`6orIO{uN1#>|ZW3S~J#kQd!HrpgppB;aoKvD295+$Q7e;N2o41ex0Of@# zBT)nQ{sSX>@4yQuP_h4LzMIz71(=5%*uv_~nWgyTzHK-h7xY#!OYE=hf==IoTQ{(3 znRq?CyNM6(YpC;zeCgZURBRIL5IoZ7v@^iBKY5|;U|$1GIz!7mUZfw1ZL+>hscibv zA&6GgpgBD7&G?u~knY&EH-N~AArWu_=3W!sYsI>i1G({8y~80k|qz*Zsh4@oR@RRahFZF3U{Ct0vzkZRW6f+!ryps$Y?v zeGX+;A1@->O}$~0YwSsolx4rmtJEAe7dtyxUgUykJ;ts0eWq&3*YG}?y=^B%J~+Dl zo^lomF+sa|SCM7$3T^_|o^-b9^Gi0bnt6>bha~DPmNL7()G8>bpXOiz^j3{kZq$^d z=R`}`pT5ApO{i1zuqzKk-JId zImi1sd)LDHg!!UWRuG3XoT??o5H{E=r#H~$6lG`C;-nMX#VY1q#-d`I_1_ff9_SJ# zvc0u=T>qIyZ$A4=`^)dZ%ZgD|E99dS#aQscrCV%iTHdZPyxR#^jQnu)X%iFRofc(z ztv=NNK8!Uj!rXG$2&$*yEkkHWH_dnNzkk_$ollTzfbkBp-zFl+05odsidhLq7e}!r zJufqHl{1pF1WRCxi%OVkRy2-X#Ye>&hY6JklTZ&iDB3^@(D(2XM$@zVq;b<27Jq=H zkP+9BF7QO60H!(p%DD4tRWqt?bGpV7bss?o-HVccezjtu>S>}tGAa|f?2_ZzyNT(L z+r~1WwtpqflbV(KM~p)c+X#Ki5g31v*fPU=Q5J1pbLrb#<|u|!&0t1R`hro&EcLdl zu#r9DalPCe+PnUU!LDqs?`ZAIyqq+5moQfNb$}_SCZ1%2FJ*e7=A3Z1UHw?U;V};) zuSD6Qv(FFS^5*!dOOL)HkzAEeEUR`P$!_b}GOg;j8eQKbx}D!PdMDE5PcJCEeeKh3 z{u_AyeP8FV9&g1$U-}X^lUEHpkFi04N6gq2?i_u_tO${+SCyHP2ZLstcHmccL+Nk2oWIyH(vRlWwEMqNDPFW8g!(tAf{ytcpd9<0=hXyxD zPfa`${9?nm^FbXzrC3jmdS-0}Bl!8jZkM7@<+rIPJsZ_LFYZ!Pfh2#smF= zilJ`9hl0%YANfQT44b~>{>#eNu)pW0z$5KtSSgo)7xMY$!hk6o2EPX z?%K@jo9|L<#X?$PCV{okPEC~e8lI#67?-&YtY5a=vP08W<`6_)h+iY~1V_6CA9w1` z`cdv&66s4*{q*YZc(z_AXatEIR^D^?SGV)qA9-zZldOT6OSTlW@O#d(MgjsV4BdVG z$FAlQH186&mP67L?fuLdrZbJ}AcUEg5IwC(ddH<=w^~j@Ioq&Txl4nV0_JNHY-lL5M-pfal&8#GK$rk!N+Y;Hq zgz5R66e~XV$TRSz4;?=00iB;wo}pAhqh?UEl#t8gT{rqre85rO+=wcX}N5Z=*p*q~? zi>jZJs&uHVr}SI!#<1Rfu11ZS^Wq`PZGgkA`{>6ntM8wOD zeoD{lM8mQ~-p@Wg`Ic)jXA7HBNam_DwlpNVXiiXNZK}hsVSx6y!$wxW<|%2w)a<=% zx2!Ha@eVzOq2ggpB8Qug*-4!Q2SMGeMAJ%?a^OX?iT@^UgNle&a+iTw>2^yve?#w$ z(h}rbXGe<^w+0|i{RngxP%-*?fcbi{E;*54P=)=lbbE1FIf6>a*xy zbW``J+SJ;lG=3M8lFqawqsoxaIyb zovT#?>Zciv<=1QdF4hRvx0^1ALRnu7x*gz;tYzJSh`a+~!=xaYXQ$@iVVAkG0m+dc zk}|ge0?<1zMp*R_l&GZ9Y`U=hSY9;w0;(7O#uT2o!)J#OZ*O`>-*Vg5jWC(1*8gSX8oPm;dT@%(N3 zf0=mWR`B_T&W498gW+#(`OOu36zqH1f^u~-Iqkx?JralR@xIxNUzS~*kWk^e!$s|F zsQI?nT8l1uy!t0xiJm>ntkWk$sf7c(K;VU?5VhUeCp1=9$Db|>O6H}rq_P34qgGXX zhC>U4JuZD0RJ5w2Im?cI!4(_4;k$Z0qy~Fdz3b0&Zm(mh;cM< zT@wSi=(GZ%&{h}oOc232b5d@G6=wgJx)|rFg=d;c$Fih0{`sD-gP|ds^!xp;57{E9 zE8)jZ1+nb~TP5O@T_!Cqj%2$|V3|7VZ;Xp|T%Tm-BWj+jm`~-#WwESfQN*O9Ye2T{ zJ7RU?tO3i{{~gx^1D+(_T)JS;|IL&)e-H6JH-#mrugp^3z0-Vb8Hu_sz7)iHKU)Gd zfz*(_Ka<4kD~T})8?cY-;rQy@e#^7GgRAp??gWr{WWR;d*bS|-=r4r_-rP)Xaku+Y zf&T%(vA;?mP4)j)(p{k5zKC#wtILLleEI}otTCTDUi-H6{5)O2?Yw<*;Ix)_i?HB(qRy8C*bss;9bt6^~A$zd8= zo2>X(uj9=S_Z@92h z_Iw_8&x+=2s7O}`wa@C8MI{RraINk#)2YhQz4nW-624y!X4zaHmsTv)4RD6@78JV! z7fcRPVYsXIYd+Vgmn8Pjcnna8L18%vA!qz!QO}R^vQV!IRtFNA-#-2KD@(J=OESLO zSz27zPNyXjY)V%1Yt#7Ho=0kcG@eNoh+c&2$#?%EYj9V!Fm=e>eEM_MK`2pHBS6`O zjjJ!kFyS_^XH|VQO+H~D7Sy{8n0tnhJ9aKobh@|6ZWYl!&sz>TqAH=SJ0EJ**aat# zredw3W?aYG$X1#2($mG$T8m@fyzNI1sk>1VGjegt6;Dn5e=XAiC2dF*Xzuzj;O%Vb zi$hwA<(IJHJ6RD_&fIAk6}7H+=gxYEcR0BvkJ@}ghD?51_czIUq*C^t9iZ1p*(M$9 z+O1Zfduvs6(O zd)fBee(>&xOJ8*xUry8rM4;&(0YnQ}VHDX;<&};0P3sM3qFNH^ zWP2lT!T2;x=JO8;w|+P8Yj^4%g=9~hqe#`iP5uAV`M+E)0@5~qD9o?eQv)t2<)aERzBjfgHC0zVxhMYi@$B~z^mo1n3zOc5 z>h@XSn<0MOyB@WudS=B?A zIR)o_0heBCgOHxk+m2Ni^A}6(h(dR00Pd%YRRH!qDEzbg=?`PD9JV4Ak_u=koNnMP zccA=Hb?)Lc9URN(Sw0&3vuXf#^7=aV7Ocq3sJTwSrtEF5RyjtKIZ?sr!4sy_4?Ve5 z6}i!*yKgU&uTbOguh_0A{78j5qopy*VpOp7=Pe}U)o8Kmh5eLf?Se}ZNK8ufakF1g z7KsKu1vIi=JWqpjdCg`c&$+{*hO<#G zygxNBJ;tjavFz-1{hDxl{ew}e-DiFv_j?ra7&%cPQSi><8Hqqb#<-bo(0;=GQr{d` z?Ah3cj&j6MnNC?^S+T|A`3dt_6JP?sjty--jv_3y>5dBsjPrCk!bO3Xy8cFcq z*8S~kG9umJ)_@~1hVVm84kjLs2fpefKP4*Ff<}cCUyvIHnU12q#$wfhJTI9#ee}G? zeS-w5uv}|}(HU^q=u+Ii&N_U$O7nFXKXjChWffC1Zc(lFllvTf9b{WEvbY~3RNylq z*>3CxO5e1o+4DU()Jid&Br8(=@vUZB@JE7Z>6Vz};cHcpGvY@<1v`V|eOAswx4O*$ zyc3fg)BBvK0TnXe?bm6Eb?y*NZyjn5jG3YqHVP{1Uylnw(o5b}(hhKzV_FW+t!B^h z7snQe#io4`ncL+Gov8L$_k%8GZ0L_MOz@p4X_rI|$j9zDIz`!sE>`rwT_c2s`{Cho zPUA@TLs9rbx19~;fXOE&X)EWZc$sl~HcwZ9Q17D`P1$0Ss#!g{eo=q_RiNc&OW)-ZlMZ4VXqH@@`Oktt+tSggb-6`pUvyp-R*; z%pE;RiNSI{7o%pxmriLbvCR@?dwsG;oWw?)_89pFOVofzrP+AfE_6`WfaY-&p_Y52 zS0#3eTc+iBL=HHjxX{Hc<2s9qu55dA-SRjOPrCL^2-)PlE*h*Dc%xM2P17gUHrc2K z{r9a~%2^BCxhMW5{^}khItJgMzt(l}~ornKbj{ z)61vDIgfI zfR+&)s9a=fHZT)1c`YF1Mk0-GKPoRJJToFdO3%$@+i4Kh@X!O;mEm|ddaYwHCU+08{06g zTI! zG&!8(N?yA*@q>c`WNDJb6?mAjd*$2S>)-$sXvP3cNi${XV_GOiKzzd-atv z&#OWErBrU9Wec{2^L@ejZSSpwt`u|JyM^;*MgBDD(d@6v)4$3T<;o_$mU>choGgV? zuAMrBa(WjjV`XpJaL&3|jXVqwrMuQ`{`Qb!YP6v!L|iw^M&)B$a0-{%BI#S^==;kK zF}hY7=bO*0Ri#!uS0kQ#h};5+Tqp7|Y|w?Q9?WvSeA9QmHqM1rm!XL~k?%AjNFjhj+#n;$q;(v1zLdVAIFvghdL!$yB8vI}*- zq)X}(De;Z*yrTei^ar;ASq z5nmF1rUx;#iwi|$JXf_kdLt6JO2tPGPwlw)+!Vv+`l zzit{<4#$hS9mS@jKPw^-Ci_x*agbr?i^Hln`io$)Dm%wU=yYh}En*!yqFsY&{2r5Z zK9xu_rX~Z%hCSKP(H@DG4RXTqq0VQ}Z0FUWD*RWf3$t?Nsq-?2$xvtj{BcbEjU(*w z+evgA?YOAc5$6+=@7^3%!rny%2zb+(&$q;tB?|0$ayH3hQ05gkG~?NbszayMo0NMC zykF$>!r1lro2+fFX;ant)hP$yTr8j5D%KSs@_#mPoz-HxP6pB_Tc6GKC*&#oC{NiZ zEpU><{5wGU2co`f4L~is38$+XYRB~gh#pnn3itl}(bW*I2x7C}T8$6Wkv6(No;XJr zx|*Q%#P4}>pT_eX^YxOamgIV=@h?t|8@&pn=TB8HXkGEb=H#_^`VRno2lXnwRb{$1 zeRUXchj~p;{Xov>$Xd90(i&SvMVNQQsP9wosnM|;g<<5cO+hB-?}Iner&2>i9`I1R z`y6%~n+v9j3Exgm_LcrD8tRyMK~q!8hx1X;&Sh{HAKd?td}Nuz0?b>A!(4T<{)8@8w3jgrS3^ceGwk zXTV)1o$ILrwC@kYy~;~4A0=pQSS2GaKI~H8aOF2bZ$Hv5_X%nPQqQ9mq(=hp+K(R5 zGLFt9RoTb__o3dql;htX;aCDvlxWK(g<%MXIi=72dMOuC#v54_J?X!G9FBELe}lVu z7s}3GQSjnCIP$ z0Ks`JSm5O%If+j95R7=EAbq@;^*%I42I;=MA9k~k1Etb!>l==;HV5;twr=7=INOuv z`Hoc8BgmZsf24?gPj^*JtpP+cH#=ocutC=hKR^dIBj@&_&ay-l2p5F!zQ*4Cem)3z zlCq%m#31dt*SJ9Dcrqo+uMw{I8Ic~iBov_!LoI&CADX)xr_qG?XjfLk16(dK7tt(c zK6-VNIZ>*oa%R-VNh2}y#@SC1?PCcJQH{{yX9C_|aE0&Ns+AU-G5xfp0Om~i0%o#u zSeV+Sslz|S+@{#%k+RxaWx!3Xqrv@@N12XK7p@I*6Bjj}GFW140~J_^}oDHaFtdCysk~d*TH~dA3pm zNT`M(FST7GhoNy__0g8ptVX%M2S-JT*3&x@w{mVh{DeJ9`@nLGx&HL3+7^UrlQ$~= zn0ez#n=it|yErRvYVsr}`6}R~glCFjB>mkisC zABCK4mRbUP(~bb0iEsEHFuMoL$3ma};>DubKSIx8GV~DwZ|gArSH?#u_44oh3rGcB zl!B95lmN`h4SillA1vc?WKKq=!Mcn77!g(xK;-G-`F<#P@mTfZ9Q}o@Nqe6<$35Kj z6}!D)eVD}y*?e30!{u!k5Wr8+!fO))SMG9f^a11w-)OXRoddfKV?)Ke_-APV;lV?& z;Z*x|0m42UL~Zhu)p?lHos@9^kXk7|v9mFf=u@DJ83xq7V@bijR>$6YIJwq6v8-?({MSMIsTMZ{)@(c=XhSrFU59e&bXg1hjfQMa8RNWR#h)j*GRS?FOi@S zA)k~q8I$M{sy&h{=EQQm<{RY{j8nLdvH9AXww)4~WP5g<+SaX>5_4h|Gfp?VuhH-A z4gZ{Oq_ml2hp46%T~#Gj@Az(-A$B#mbSsqyO<(1)6$Mi-U)(*o2k(jSPpz%|DZui$ zz^x(ud@PluCg7*fBT;9r`r>>Mw~2D_^v9FiSo_S+SC65Bw%i4guG5NqVRMAOS?QhP z-RZ4z;pdb)Hpt*tdq&9)HD^CU<(lwAnJ2SI!E-sb1=fHvx!Sx-fl4Yn+c(YYO+mX! z73A^t8>Mh|RMPTMVPu_##Tj|Q==6ij=`!Xd;fvt6m7(4R03RFyfA440PusZ!k<7FJ z>)8Tde;da-M=zp((i@q-s?9Xj0jhpjsL=lU3ciM9sSHv$_LTcdb`q0#Kl9Z80cmVj z$nQL7#w~&e{50PFp!mJN_=g9Du@bSJEnh*b;44ot>VxX;Iyqq-G+_o0lPW@~+~8lA z(v)0Ay;ZxDY@bhOC)R|bp2NjzYzSLou&?pMIvqNZOb|Xx#J8CdtG@9kdgJq_RzlRR zd#(;gDnACc(T zv6}`YR@^FmA6nThcD%4qSJ*{sGq*ztkh#fR8Z%)jYT$95sPNWhP>_1;si&e$rpxSv z>}huNJT{^OU+Ozoi%$Y=TOdHVr1LGGX=Ch2uJm+GsrLrDjIYxSXJ1)w>0b*;+I77Q z-GCJNiAunO&x&R69k3hb9?Xdh_+u4@*)ps5Nkdmz;BDx$SAnZ8 zRIi%Fx(3Cj7nIWO816p^!gQFNT}Mb+dTRHs29;1RwP*$k0UQl>N*a1V^L0rlm57sD`5K*ozMIH+CcFDt z4KC4TD}aV<0U();L!}_L$MQMIOQq#wH@@nP<{K;VFIy^UJ19RHh zf;jsipi7vYL!Ap;lk9hVy4Bf?kGjLMGn<*(!^H`|*tTQ&_! zt_%eackijW($q@MxK`g`vs59Gak+=Maofy+nvceuh~-eB;23?G-)0YP`Q`bZGHcvU zK9#QIyXv!BkMu`g8rG5+&(mCr4`PUI)nR*saAzwI%9oXVGO7I7?o<`jZL*K}o@Wl) zG#>Z8J9xJ(lK^EUh^GIl7sk!2DaZvJ>sXC-W{O`$>typqo9bUBV;ZeE4f}G<7?$vhTXDoAo#6vq{(KGKku^f7m76}eT89BKn3E71QAs3Vpw80Hi zgF<7okuBfyUwt~35<{7D^IL-#D(Q;`@Q$lY>08m~MG9>2Z|}gHgOGFX$o~OW*3pf>1_4E_{(acHYJv}pmbBS$ zbCAb-(Rlbgf*>8_+-nN|TMOXA6W0zx&Lb)pG1j;W{I41`J*s;p_M2AjFQ{4daV*VY zv*zawmYDjDFZ211xf^fV4EBZHO;=+Wq*gLzldJ$X(2z7xwgBUPqSjUt0IIFx9QeLB z^W{@x5dKiX?W_%A1sx?}IO?*Wt+Lr^7;cm5f8$@ExznJEh`kG0tJMq{H7&wP{Ms0W zAvc0(yJG48C^0Z6;c zBp`8qvZUD^%Ky?7V5}&5<8X5`8b}G;7FtS44Yx=FLEcz5I>YHdDecC0X8Lk|bDXL^ zw*=3?1o7#a*K$7k96@&(vqEM&v(I}>?JF0<>`qj%xS5#6tq~R4^y;P~-E9kHcdrf& z0%^!XAPo;lv=8BOXj~eG>MCfw+Z_+4_{j>bU*`&1znBkNyg0ysqk(95L5alVBu-ZU z*;$7E(Vfp-3I&ust#B6l*yWm+PVV)?)B5}{U}RReIT%UTR*;}}yuJ3|pGqkKBtPxsQxz2;d1EKrLP0=2qj3R4yn98T zJxhi=)?AN|rIf_2_RN&Zke8=Pmy2EL))ch~eS=Yfda?_w)mIrm)vwZI7CvFHDGh;fzjI>SC`a);IHSRE^!y-NwIegL1 z1je!Y8}8&&G7qsn$$XMCy#4CbkluS{IP~0qzW#%)*KS9^RUuPN$rAFEVNPl}VXO`C zty*GH`dvBL5w?1reUY|PX+ij8CjI*AM#wRz8(V4R!jPOC(@_*NLz$j6qAphMNv+KL zR;$vH=hVUSN9g2NPv5J^6IN>dmTj`}+arN&R+_1tSq^venniK4xHQI;4@naF7q03%wp>~)#<#DQVy2R&I*u!YH~n@xv9p0iBuw(N z)J6e`pY>7oGuFA8**Y|1Qvm1OCnbpkE9{M|*!&$6`7lVYhVua6quz&G9*o-&#u z;Jxq%{=8AWDkag$`=`f8cvRL=(Km$7AW|L66Fz_Ar6#awo0;;{U2XM=3}? zD_|9xX&w3yO>TdHry|S;uv1N7wnnt~;*q>;mZF&4u{h`rZh!?d5iAQ(`}T5hQnKyK z;?3ME57w%GEuc8$9!t_ap?~$<-9RHVL

Y@@>6bfI0Cc|4gt;5?L1F{P~-kWR@%y zJUUym5g_|4y?l%t6JJ_}qm|7#TB64zR)}!X=i%VyDii#pWSEj-Hm^~yFwxT$hfm#Z z>c`Sz)P;KyOQ$gu-}rM8EvNH~_0Y4^VTRs`Lgvc_qi2B`*!a-2z2B1AO=6h#Uzh(y9ePnayZzE_#) zfDKKYxnz4aQa0lLgJP{~wum>C?&<5zyz}Jx5x&1_iRLPj4Wa7B9$$ z=j$UT_B1tbU;I^?#J!)bS6HyCvD}dROO`1NY)%VN$at-JkL+ z@o}X+Bb7icAGfljt#9I2thM221EceV!=7z3>q7Gj4V z-gxSeThVRg8M86e{H*wbvKT!}ua@&ik8@&Ay>xN?{~m*Dv%l`X673FIym-77>y%Rh z_bYH$^ehRJMbdiTuRT_K(YRIinzMv*o9n3m>sM3u+Q1_RP?ua#`tCNo*Z3%Z)9Uj~ z>pf}}N1Yh(z*uG1y`Zh1Hof;zLe_0Y1f|39MbeNxE*TDr@TSIO`5)ivT%$7?{Q~cy ze0D~CwL}fgT5RWdPGgf93F)o+U{1*eR3Ms|Xoc;uRumuK^a5F-5Is7X>thBs!+gc< zKpnDOyQs5_kmIdT!H|t^$ zHeKAc1=oTrMF`HFI2BL9UWw=zWZvPyxifYjM?z9VQ-_|(s0*5GtG zwSLe(vLzx_^-!MAPxg(d57(#IRCs{kKI%w@78kEQxgdA?B5w7`_>YX?(Im+W0~;>) zelW|Ymx!bM&|hK!*^Z^(9wKgsO=#;(1O-s`UFw3>kkg2V?Z z$%n_%A}Rm1Rt?h=p;``QPw$(mH^$lOPbN*5K1<2DLXk+#L~h$FqVogoVHaY~CMMR+ zID9=8^7#8=^Hw)QsL;p`k>3@{mj6w5|GgUgh(rO@8_y^VY_9OI$bM zde|%5s3Sz{^W&tpF$icBy*lc7!{+LTT$skZ5GxP+f{*~v7DH(!{`}^XD`YP&*yXZ# zkn^vUcb;e0ch}(d1TqbVEJLMz0Dh+L=TGR^&L@$&XOSS-{CR!=)M!?OvR`q>4sCbV z6NR~%m_8lp^H3Jb;qc(tzwFM(^qfR~At%VKNVy$1(OYQQ$sF1?Y>`>xdjYvut?dqo zOc{hp538Ie{ly;nWR7y3P^>G7<(5=Qc>bJ|O(Ofr90#!ac(&3>t}U4C3eX@w${DYv@9(ox^YzPk1Y4Z;jiQ;5s<~8^WPh$ zYr&X#KTtjkS6E2|w|1dfx~sk_$auN9sY6wx&q;Oh<~EF}JwI2ejMFLc#k|lL=GE*7 zsaqMHiVlmcz3f!wfCl%k(#naY0zdnNn2%#!(!=YdP1qVg#0r-Dn*9Fbfh`+-PH@!2 zG3Z%&HU+q&zP;mKcHS%1a`~p?B#~~Xd*67S*}V55wI5-fW}9j)-apOD>?%j7TS!Jc zxOc;~quC~H`X%StfD>k34hnS-esz0r#OU07GEepbhcyONzTNsln9l@^j5ibr2N-t( zFG8YSGtcV@B@5d&Z6Oqc_|@!yTLIzc5domB+WM)yj*Jfv1K`XD+~#F>gbd4pdbf+3 zCAtCx-g_1mV7wxzHIV9)j43^K*0fyP^sBo!&T)AZw9eP5$4RXl1!bC)LTh%k&Z6q) zv8y*8JjoKaLtnybT0I43k>WQ#9}^30qd1SLn>KxrgZntcJ}t9H{WW~ zaR*;&4EjmrVdKk_>Xp^MWVdktk;gW>^M%h8o`ubHLp>FB9))Ul;?kAzuXku}dKA^o zj%TnrHKs~AC-lvu-Z0bVAKyYmyl*_suWZ^(Pt8y(?YaXl99h6*`<}7uUr}+$`R228 zQ1FT8j{I@dE_gs4#D#N=Vv@2<4;h47q|ToT7Q^Hi@M_%YcVF|~v|el=bXA)sGYKoHHkwJ!ELmzc!WRUqbG$p+zQtzC z;mDjV{VX$UIW${VUFEf1y(k3fCSe|^qpc!Y7R9h+K7a$15()~>MUd|z~%^}%O z304bYW%-aTh~B*4ZbvnqNa4f1nRwO_R3z|7vZurinl9V&A^8gb;q_!j?OC=9yZY|= z{ah{sdj-iTn^(Vt0+izPDZsi=xIgzFBH*e3g^14s(^bVtga%8*EoNV{*XZv12V7fZZ&2 zp_ju50%n6SY$w+d!)GBFK(tk(ZKF?wCEs?i-^!f>d;O=rBua~6J4&9h~bVq{{Fx2y4wHIQvn z;^!+03U-}Kmep_Xg;TY9Em_nnZGC&dWTN1+tA!x9*llMc^l7Le@v|@o*ER^KIx88* zZ+b=GPW`d(Yq8$D#hisFGFistFJ0Pex5;>oHDnfNbIkk!IRMxGnpG4mg}?qUbuCbe`b0GoyNs5*SatwQ{Q}tr z3_cznChX{NeZL5In}6L691Bl(`q1^aWtP1LTp$FM{l0pRcN{EN z6}qetH7u!MP^}Bud{o7EEC@Mo^XXOh*a{(4wbofda%Jji&(`*v8P>DsujQ-vD%3A&vjNGPPHlo2=yPaF*CyG% zR;2d6435A1`qRymHt}ZvfKE-T@ACnR?I~k8??yMlhV`omz}s;b$RcL(A^{ng67*mW!3JFncOu&FlN%qb;zu=8m)QtA9i@`w)Z`De_?GvlJ@=i zv?P+0&yHLnA-h!883KzJ$~xh*Y)LM-ZG2`|eKfB^%QbO7S`p`lJ@JSXJf8_Ah9~W$ z!{*^VC%kM0nxv=BJ35yFENWwTVr?FXzHMY))L|nC+Y8r01-&h@3}`FKNpejZNP}zN zT?**(2KeO`c$5cdI}3&^9wmyq(q&@iknNe@wKq`qKSb6hRi^HUtC)&zwuhSaxA9qu z37C)?6v)WFJpKr?(F0LZTPO85g(6lCZXWiWR9S$6hR;>$a7Es1q3)?~vr-GLgt}g> zg+u{RZ=~?Y$t}XW^!su?)5El0hJ>x7K8GZ9J;tH~E?e_rAynHS^hd2D!!{cT`B*m4 zU|?>Tk|^UP%R?mZA^WO4gUf!otE!?x>xJCT651CL|7!E&cCb4ZK- z;YPaE{Z);pFBW8vu0pdp*0k=nGTo4U>kHNU#E-m+uMN>BJ4k*+jG;q*L!&iIA7K>px}nuvu1V1h3>SvR^Q6BZD-~Kr#@tSgDG+) z0ls=P|7ug6r13g-#p(uJV3EX3i{hFzukZG{62CTu9`Z=Q;b4>9TEzW!sb5ZfIb*PQ zwel6}X=#XaQas$?6BQB5_5DQ4>!+qAk!6vrCC3_JV9&g_mz?M{5#RRq>%WX+DQVf7 z+uC%fgxRB$d1x?42Bg6v?^?ZE_9F?MI_6EK)WV~7(}@4a)?0=}*>3N{3J4+y2r8n4 zlG4%=Lw8CyNFy*HoiihXA|c%!(p>}6N;^pR&|O343^Tm*?B}=t@8jP4(|ug?56^|ZdHLf3E4df)b4SG| z7xq2%{WH}Z)R4En;$2aFh`k73r(<9qJ$$09LGzSh(^k0i<`YxtAn>$NA#(MCm{bGWOd?$i5 z`}BflF>Z(}u+Na)oh)o~@B5F=Zv&-C{G@*t=7TJM$uY#U9OeI8j`3ReZnPMxnXcI# zWp%Ss3kQ%&KrP)fl>oM_M zU@zt;EL0L1yiL(~Hs`6IJE5eQ1*)<>Q~F?O{1cOO1Dx_zdsQ76>xvi4zQ8EgJ9$}i zo5e`8vWQ6(-}M{x_o!ZuX?cz5k#X*-LJ_jF7F+&lEq~;>=h1KFUS%ZyxN?QNG9{L})!-#LTS!B}7UCtWX2 zA#24$;5~B^HMGq2Zz$v-hy<@z|7bD9dbs%VLEad_OU><0Vu8ud7tiRb>Xf>3=8tPG z7`Gj7-ouyKjYu{Q6}+fyCc5UKPZe;u`mE~^O|EyHuvDevMK17byZr)|-OaL&LUDI1 z#g6}1PW**JFW$qN^`t!eamF8B{P*GV^MlJn8uNKfp*O@2)i-}3`a+KWqpY@F#`yEE zx;H)-xLNH&1U79J)>(ib&-S+(`VUY0o~f@yZz`KZ()B!eW9KOPl}D7c$h{Ve(l4Dr zgOUSOL}vLPNP4P2{T?xPse0FT2T;jqmuGn@W?XHs7@3rz)mt23kSVD1YZ|LtsJ!LW z^wnoC;adXm!xk^$+H(0*@wOt5iAz}f#L+sB5M-_FjURXn2NY&~ByvRg6Oc*rj_JD! z=*qk9^@O?fY?c(&HTT^&K9AA>ljXf{188nlN8l3ZxD9`HBJg-`u=lAUgTGwfgUsIi z(ev~7K5v@^cx5DJ==`wEny?3dg;`tw5u(fnkgQA05qO%&T4>d=oDZL{imNr=ugzbg zI3s`Eix*gW&deIANtL|~`iW2Z!~a1=3kE)AQ5iUfnU^GeL2t=Yf|-qr(WC;wW~#n1 zi5!Oy4&A!Mo4R9aOb0m0e%oNfvv(V>+6NlMH zSEcA5j4{wn`&(13JD(yMu(k6o?Jv7-LqZOeU#S1WLH=RX*9~_EBnCtpa0fd6R}{tS zreLI4ZAMNOs_VQNGZ_LkZwr^1y9)68TFR$_E+0ASF3kN`p0_Bqv<}>r&mOa`U~|hsL}U zP}94QUz+;8r)gQ$TJ&U+AK88zT_R1dR*q(Rl=2*`sp~%g4Qc{V4-mKhPGL9t+D_6C z5efXtZLLEcYb?OqJA(9*hPxJN`5#ugT4e9(=m_fYk7(AWJ02Bz8U_ij^l5r84(X8R zHC*<+;V@BKghMph5<~lIqJL?M%Ga#F6-&X4I)c{Z?^Cc!Ek!)FMaM0 z>~4?>=ZgE%{alhS{l7f4qxNgZsdFy7I7QEYTYz*0Y+m+90p zi*;`&iB^|^_QjW0BTDimSUCl#-gjCD+!MVV4kghq@>J|ot_p_TswsNqty(F_U*QWU zypD0?DKqMIGkA{gtqe__=-KJBfZn@jq9tRPL{VCo(f>(1;3PWFMd9$*$|X;0_a@oE zQz59PR0DiWVUo~n3o+Gi2xEU2o_e55ib*h`EIb1~1HreqW-0OahI)QI1+6OUyS1r~ z0fQ5$y=oEh_SzgAd?d_nWqGnj?oc|mrz|wrkG*=vtg{@5;%u5l(8YvWtQ6Hx3Bu*S z$C|>6pU~JTxQISUzN8`*H=&LN%XmKf&xH7@d;p}9){ zbUra-oK$9Ce{=G#VD$(df)VYlCjj~Sqb}BJRX%PzCC&#}I#+DAI%fS2Wco7=stm#E zk513X?n(T!6#YMtugwB);pDqNRHAH%1B6{x7q8xWwxXCq-1RG5SNdnaL~#5M)K5^9 zlX<|BBizyK+El?|JDnEK8NI^*pZ7CxwQDBN6070Ia6)fbe)Z^Cf`Jh*1AOl9UP94XNUoVV$c_EsOZ_{?Luc1WOY>_v#J)>^1D)Um| z5PSm$FcYbKKmAPyG1Z{{^4e^Liv?PEvDSfeH-zATkQ(}qXiPclEPBrVWW8YG()bL!Qh?%z;M_G&+ zCnq25r|8RF=}(?Q?3M_%sts~)YkA1FQjb3#-lOX%dhuTp{@u$T=*S=1e~AL3;sQt9 z>7)n~RJ(<_RNW&8kbC~ScIw64W%yY&BdfGd)}<@Gte+?J1tg4zb4;^LwX9A($ZW;C z`7bAi_-9;F=q3u2v3)c#+?5tTkyGu zN9%h%dTzIH;Ruh~K7U;KC^tFNa=&dz+Dm=QXbOK--A zq17+e7lX2A92@gk9lWhId7(hn%zk9&FdzMduCu`=K%*sNN&GOzzI_X>b5-~*ZQS8Q z3Pvx7G-DxxuHf#HxvR*TiGgvIwI*w~9Dl!%6R~;~B6)Uh#gh+NOge=+@Gv9EN!6z<%PG(Qf^2qRA+JP_!P2WeH0kx>||Xhb1L|7}$TByyt*?>HfuO z468oBk1Ahc`j~P4J?Ycjc~z&GG?i#)+AL}!hLdpj4H1Moq+X+EQEc0H(FJmOXly;p z?Z4PB!}lm|MIPVRv~KbGc+;h_b;B3IIbkUG7L3ufeV$HHJf?_TzlS;??#uY>}Bcez5BJtrUO#Z$w{l6}eFv$7&3&k1oK9H~-`(x+C)dCn@<{a!>7Y6KarO9F zN{`D|4`MQFTk>6)j>M*hYr_-k!6=XBy|PP-8D^o@M;6!uF6`0bBweb*O13l>imw>% zg6|sO=Zr3yCF_{xsvk{G8+l*Fi?15@i#5~S1b;Yail#E0;&c=TJ|ZnQ4r;fl*1TnO zFBRg;4@ImjUA)4aSH4z@yB;~|Op^WO+MmpA(HB8&-sM9sp<~?lduPwSeDeqqQ1`y% z`TGsB#{Kds5v)@PIyO$uf)Ttkg{Mv04Qqxf6JG5JrdPJM<+|Gu2qPJ*QtOT1uhxrw zYc~>CreD&)NI1lbH*Mob%R4nRQ&Bxk7tXw_`uL{tlE49*QcpJI1pyv?$a;Y7c#UUToU zC!NCc8QHMn8r=$J-v$T9uFWyE3-ROW^JB_2bNgp~+8(GKhA5Y;>tCJViUjL%T=$w9 z+C3Y(s-h1l&1UUTJ*ou8 z`pBe*?xQ^n51lrQ9E{!*X*aMj z60vK-kt8D+m0iWDorvPSH&X^t`PmkvEd}G3-`LH*7W(v&_r-iVlJI<1jlZ%#7DsBG zT^AVRy4a%i89~};aLY)cZp^%lmor7Kx&?J|%@ae8H~1W9s{KqYvMDKIcF(XZ#4N4M zw4A8m=2LiW&-i&N)aQU^nDH}*HSs8aOu^*Fv}Nm8nZ4YQmuALU3!{OhcZ|h_BivlK z>vHS5lalkisF|fEJS{oi2t1~|7JfoMeDny#P2(v$s`=Q#lydx>7I;Gdx(SGhBBC^E zN8|`v^&snx{2v%TB|io`-fjxhEuabq^i94!j--yiU0rMJlPw1Kt0}dmesD;IST$x2h8JtM!kJ!6_XwLNxh68SjFw=oN%jKwl%idEGuoi-}BFN0Zb*ukfw ziJ{RiCf@3Ves78`^+MIzX>)`;&S!PzzUd*vrAV2c-ZzMT2H1HQM0}KB(hskiXpWMk zGy*e^H<4iG16iAdL$4mx>>3-E{jo;@)ze0bfW_lEACo3fGz zeJ-oIIB4nQ;<3bq_NR{f$UX8QFWSXVEIz{tibg{&$-{EN;`(aD+jUi>gO@;d0xB3i zJ(^WrPdW6I`83vIU-6^k-iS_ftXXtgwXAZ^DIv`>MopDlzAV<9dLaU-6Nzk}Y)d&i zH&f>h;EW!G15L5+oRp88MoQUjQFJMzy_XNW`33ztKJ|7jf*B+4oAq@x7CedCpn7!_ zohDchQXb~XewzXfFZcSQ%LN-DA8H#Q5KZn1*I|k&R>^#9!$E!8OI!cR!qa`oxL@^Z zNol&lO0K&16V5NsyuVzT5l5m2i>WORcuzir)SK&AV0LR&GdJrWN1vR!WEkSo!z0hU zcDtdo73ew}Ray@R(}~QLTYI=y`HMVDH2RYVse@fQmaXbG_ntleoYM{~w2IfOO=?@0 zr=!7pIZ@GCi#)S@y5+<=9h=G369|BBY$iR^MZJo6*#{t?dL-Tu%;`l9&#n9nAyL|Y zG}PW`iW;Sif4^_{Z z#IuSzC9lZ|A*hun(d-=##9Lo%IUek4u~%^15Ufx7{3&dSxSW(j;ht^OoAmA|N*ry_Zo+GZ2tRVLxpV=K&!+OV+NbVu|@ z7QeEXN(SMBqJCiDN%o!w@@S~1=5&O{uF8gYz@mz#hH&TILsih3`}xVa$y>95V6jeI z)vZ5rCXKMaD%5Yo!T;T-zZ{fXS_q+oW4hX}2hJhaqliIM;qWSrC&OMnU!H~Ha9vP$^QPs-;8_okI#KDGp!uDvGE#;%k7=z1#=zd88tiEfT{1oFCQ`qBDa3j>{N?~ zym7#gChFB^2xMx;xEebfuS*^;v52k?@^0+`S6h0c2+jt)o6p{9yeJ6IBXylJ^IF$j z1B!dCkU*n*DOD#fwe0FgvJ8Idg&Gy)FCVOvc)j}6hADBY-m^~<8jDf++rW$G%xa#K zIzu*-31L&89S&MkHrEUN9<6B8NXQo2 z>B<*}Or-0QPRLI2hf9;K>wyMdgK6@#;qt}Y?%)?{BddH!asu;42`|GYa7l$idH zE4l>R6WsXD#@F>nGX3}Aa!~q1*T`bi)d{>EO}SW${z|&oBVk=hk7V3@u0x)t$8w~| zamB6la+Ju!KCN&5dr4k8wX7?1h#aYu)|;eL?gp1eG&}RUmk8PA7ZPrOU+Ct%i^a0V zQOh}Lv~TYFjNU!r2?0Sd?XYw{si6~v61HlaL zu!mSCZ-qIbsmxdHwsCUeuOHvjOtX~$rpvr45ar4l=hbGAKb}#tIqH}~4mX!4qAw{` zIQiP%au~0?2v=)1M=Bx~z(D{Xxr#MmgxhM6s6mlIHU*^twRLp4xkvHxX2@T_tKb8l zv0d}p))gD>kQQ2DR zae=H`c6HTBYWryX+B}}?eCcKqn`mX!nj(D%XAg%S0akRg5F<>;Ka5!@*>R(B`0%>w ze|3FD@dDFMzvta-Rn9k}qa<#Q5DN;l+HOU$`oxQ&^sySsFIAgk&*-HpRkieYnssCO z*_XMFM=8}#2Q@rujG<~W5V-i|Rb|s4!sGe-Rhl!gonpP%VZsq`fqU{#Qr#C*Kf+|l zYh&LuSHI#;7I9R64v)hD-3V_P#}R7Z_Nm|iIlZI(+4QP^lIcr983M%9O;%tA+)kVH zntK2VvkBj1jT*|y34u%`+>(!!;Edwyk1Xx9ARU+G>@9K1KKtT(3TzA5ic~MvG5!{$ z)*|`*Wknm-);Z>4D;XLB6UI);?UxZbr`0YX;{83JmKuSxzDr8=tv+dA>A#{ySX8Ff z{KUq#-fE>!6??CZ6r!d?9Qd%zJi!mqS3dW#BSmhhcfqUN$}Y@5(w}Dhy@9Bigc*KW zXol*EUw86m)`Xc%y_hm(=!mG{1(Mviy!w1-VZ!3I)8Xxz2-t9m)8G4eFc>kx8i z<&FsIWX)gPrSUrbZ%XuTv=C4<9c6Yvf2_56j#iX8-HZ@m*nQpP&tj{^1Ss zz((Q|TsWuqRH3ILnNCj;`4TRUN37afsveFqB2T9=xXk56|L-&!)w)lk-SRByCF@*j zyPIEmKd+3>iDPLJV6FtGy{cb^%F01QZJOsYwP&MW06!9wnmYU|@!?7a| z++8RM;vKNj@UD!gpKS4(3o_ibK#=D80~EdO`>cL>!jig2KLv8sp|d93$h>gmhwdEPKqp5>F>2;%2Mg70egZc-47D5PGvlWD z8$;8yxO_Lt@>;L$(E>LE*5cl`nhjEtpzl(`NZ^^PfCdZS^pakPN~+mO`>Q}R&TAes zP1T1J;A-8QQ8aJQ)8wZRqWF)7z?_vIX2J$MLwq%yIQfcqdD^O>la);>i98I@JkFqD zgzT{>D2ArgcPnb3u27zCq>Z}w&1@xGZxJf2n4fKb#IxmY{})d$@#lqjfwf_Op#j<< zgHHO}t?wpZQe+B4a?A)OcGa4LNj{r2`c(UvQJSfeJBbG9Y1)akm{q|=J4*C56iq${li zla5xiS`DXP5%;nQKOb&(*n?s6UqP!56u&_i z2eT4`_iVpu>fobj#rK*BW`X-$Op$%BUeCPIN^p6Z;$W;gIvmYPD*`pAx)(Q})HU^R z>Nom39lus401teY83L^oyH3V^E!O7uS*$YL59}DWH8aT<-0r4UP%iY@r@~(FjmmVt z!p6tu@~EZ?*Dy6-L*8!LrB^r11Wq@TK-_6Fq@1zK!w+p~ke$Rb&tG(_ues>S=B90~ zG1|Y@Z#opl>uid!>MNpe=A`G0DpCsG&LuvuK&OWz-;SbeZg?u!%g2Ni&$T`4vt zz`QE6qX=m%Jd6LpXLZ-Swe{MDyQHu6mdEPbr}3Ewwl%6P()Qy`sb>Npk7))wV55sD z$PvKd+qxayA0Lyy!RcBtk>0eHD((O?b4}v#fJJ2J*&A`5&M=H5q{oAP%*}KcK%BpL z$<2&7n9JCnXT+NG%s59Cs@(^-06qf}K_%B%S$XVL`66nR-Q@H8bF$>`>%QH%whM;? z&9)AI0tD$Ae^pHZ;!_c9|GdZl1p>X-#dB`ISVpuXQ9>>^S3_VQ)EP$~Rpbu0ma{`7!H+o1)Focd=*7PbAoGV!%;^ln>LoH5v*9nK^eihhrxX zYl{rZ7DG=u8?9BFOvJo*y0x>su2|bCdM`h>$DWtBgEc33zLL}}4!XoTR+*N<;RZTwJ&GpRwy?&+*^XxClsh`~)aV(r9 zcbv%52%Z#tp3)Z8i?lqPrQBtZ+fG*S_IYBT3e55T2%Qvm+XFP&Itc>dEw=JlrJ(p65OA8BrKd@m`F{Idi<5KtGbWvjk8c3QmJBewzEIG+q`zU>qb>>qQF zjoaMXG#iR@zd!v!bzZy2lVn_+JbBu03qHLi^>SvE&RjccL^G(u5xY+>l5S=m37izWaf3GrHh*+gJeLS{nxXUU z9m30z^!8ujc5{D3C8#S8u}r9seE@Mvu-=zi8lZoVHm)=s^R;c9f~DTnI#XFz-Zxs= zt<=A>`%}M?2oihOcC6U$eiY$uE%dFI)V%-R=%+<-(u&q3y$FOpa>1PE^W4^f2g$LV zb%us2g$yEl1tqC|N1(-K%(~w`>j&j^9??l_h;OAHhmB1Adb7)S2Y-q9RML+nU^si_ zj2Y(RW^sbs#)G$9JI4^1hv`kZW}QI$;I-s`+|Y4%#V)XeNB--6mL$U}(gXG?DxD#R zbl7Ywfm>o}jW0ACcD<1m9KFLHcPoy6_K0Kg-hB7I~jlla1bjllR+`CF9y-ou?&%I>}D#ri+PLl=+}A|4#-CZ=56&GfG^tx~~^;f?De ziS)$9ICAnxN4(?*<<~jlP0=Bm*GGK;SF2udHhF)9#7{fs8+bKA=Ln6K2^EgZ zPmJ4cY;1Th{?mwq95~d`e~uZr`!}NG9@eqgxa|&y98WnLgjg$bsFj4ih;wtRiC81G z*;y&WFn(KVM#yTjz=nBRPcwQ}^yVjC5enOt8{LwSsr{-Hl+ywv4M0>Q^1QdB<$qz} z(Ih%Q4^(}nn2r)`o^OTn-kIu^KI%*-MQ&Je7@7TX3)3C6qqJb86{?soY}~#PMeqe1 zQE`eiSepsb@?Rfy9uqwE z;xf8^u*6^>e2FUt~_nvOG`?Wcz>xN6!OZuG}W12vE7j2UCqs*)KBqcG|X zDJogZ1`)Hl>jbIW$&z=UIVZOJZA6!n`e#biM8ve`=f-3Wo1YsmiXb3PmP0(3x5o*C@vU}Z=yWqMCm^7zay7w)OWynfp!{T(~Ma=OlLE9+S8z0s-G3wUu z4s?|Jj&!N5AnVY|S1};#H@mJrE{_Azh(+W=lAh%EEMGjI%#%e{ZB)w19C|Os@jvDl zKe&B!vFJ-}(DU%Wu?Kbe4|rpA{ue9_630YY)3n-{K34h>HeLQcosYAGPBBZuS9|G_ zEcmkg^K5dp_XU*pL}RQ?nF^!N?jK+77_~qfRirH*?l3~VSh-kO+|vTm{yhuem$!I3 zp8Am4a;-`xt{?L?`48ZPn3aSbz&%s6Wf}G8c5}1>#6wB_c+D|${3K*^$S8RPEK8k{ zBDL?&`_8#v{Yd5g#^Ugl1$A&!d<*ebB}G;)CF<3Ue7m)#6K+Z3!?vWG*rJ&6#@P%3 zU@yiC`b1`|M}w0t*V1IY*5~qRxGAjR)@hT>xZ9KL4sBN#XN5N9DYq44HK9t##}yz6 zbXd^-;B;Pd5u^J;W!$FEM3bZmI>D2|GbxuQ%%`-qVimCG)p{avetr_g6Tt~_%O_q3 zE0bNW(xJ`}=LB@3Oe5*#uMCnTkZBc&Pr9i6GS@^?L|(*pM=4cj=EdUlJnbOLx%~Hcrh5AC1_e3&Gyc6?-MDLH8eYsHtkD@6&$k5yC_IcDq)*OXMhGk43jQyYAa1W4=T#lCs`vhDjD_6pKD z%%f!1A#xn|HwANand{Wb(-Ov+XJU|zAjtOcP*!38L==d|!9b(Q6L7=9Xr5~kvOn_7 z%DNv6r%g=a5CPom0k`R#l*(i4@MNDQKrKY~1t>tm#^+Rbj@7dhBF=JRWV=Zqv)o6e zr5~NT0bBJ-o-pzU2O@;GbeR7UbUpfc&HO1au>*_VZ&RwrB2@thc=cZzDIYKbWY z%C028W)$F#3FncNC^ol-(h^T6`RzfoS7zY?V(BUmOqCC0u$RcsmJDQ;Z{dV{U~H#xm&$rahRLFo?}3i*`^ zl%J0*77l-q0@Wzj?8?T?=4ta=SJb&L#l&klED{#f2((4#d|l@WD`i_rsRq&^uk}Nc zk%p2>%&nTXnJgMNYbQay+?3fv?LWmCo#hTTqXznZyJz(RPGf|LYd2JXCm@p+^epd(E3{aNrIUDys+JUT{@t#%Q44`AF&INZ{6yO17AG zY4&si$^jLptyTy4I%3sByYf1jSG);&y^;hBcv~CLKe|Hsd(sQZcf$;IO+b1)KRodk z^AN!i4$?HafI50>UgS>9nP-VF&Vci@asUf(1#^{Lz` zh_BLP>tHK?!L2;J?kqZQ21QgSR|&l!3a^j2h#sXf#ZHVkbr^i+L=s|gmtcm_#sT#+ z3%ix&-62Ckjj`X>9`~-u2Oz3Cz^k7us{uX$*a4whzVfa9!RvVa{8hV*$eYo7e~0Ej zf;eAzhX%S<19)@Mdr%fks~~(J0Lr~ zR@Dya)rnH|&Mf6ZVythw4f|n)sX}j$Tzg7T5F~eQ&a6@GHUdC63 z0ehBZ4G602*9UE>lMxB!nTCqzPlJ#0W=%od(ALRW2?P_a+BLHK&Gu3$6Ez9}IISenJ7+&H?dk0dMIKJ*=OHe(K+h74+%6^Q{5{h< zLYAD!&G?G-(_}aZZuSf&4z}hI=%B_elo~HngJt>cx*M5mt05HpnAOy2xleFn;ecwx z^O~GWxR+yLNyx*&d<8vy(Mny)x`}r?(-9p}dZU*t{Avi3PR!T&e|gv6#%;WZbB2m8 zeI@)Z`y4`jJDjjiH$(F>`2N0 zWN4{0!c*AhbXxrV>Av$n8)mN+z)b{&o-u3;AJ)g2poV(jmqYX!Sg z%hn}qRhq2U^(l!^i2oi#x2C#i$tc}*6#?!8dm@RgVUzyPPP1IWwvF!!nd}H+JLRdw za<~IW4fl2!A`KtyieR9(M~)gGO(vqyR8B44$g#SbUzfcVeXNCd1AFVu!26K|8k(|p zEDtx%&iqP=VdZ+Cp8g!$ee>`<$57vlV#kMuA2iF<`;@iqx5emg6-N}92r+5A9ZyF* zZfr7ckB*cyJ^1p(91&Zs<{H|YC42l~>8#1?pwmzumu#m>Rq#1~umnQBbg%6xx zfc`E-slHn%AU_L2+1q+}svEOgy`qAqL$@VKDG9UOwP(JJf_N3l{mu`WKNF_1|NIB! z`2G$K#s$q@^a3yN4;TOI(2*j&l-S(Xgzg%eUrAPyoT$9K5`Z-12(*48A-qM7vvT!O z*iDVy*!m0Gl`O>CK8~!Ek(&6?X}O0J#bb|M_6pX0xLv;}m{7!Xqe?!(v{n*GmdOut#&?Qnj13kJ4w%nwHOeHJHd)|#(KcT8kWb|d|rDbNn52o zw3Nt9O}&`5AP%%J8GUtX5hVi5bXOQV5$I|afSq9mL#DJ$w zJ5{>dvy#q*xVP0(v;Go2UKyI)K!whp?tKUBR!`x(FQO=EQ}z5A9KAJG$5hIM?0a{= zofG$!ckVo+I+x<#HdcEaIi5qe$AwZm5%Hov+b$$rbbC@W4=S^{S!#E3m;C&(?Sist zSGoY0!^yo@7k5Cqlmko1&4mQ+u&`yk)Mb(bPIo)L1XC z4mPsTOZSpM7D<9$K5?)YS8(KA#XyaR!V92*ZZ=%!Bs;nQDfKqecK#p7)6uU2JZXq- zzwxS8r?#oaQZWp74-RX^Ph;hVnwr%7Ndlic{Gf#~dhEV`wNlgfq>`hisUK^J5wH`yOQk-#rkdRF8U_aO@e%-G^P^f@OB&;(HoiMN^;V&qL5h z<~6H}iNAZ+_Gsk#&@&1j3O#*M-OY61a~a{h2PQX@j$bW4nfr{IxyWn!lVcoPs~)$2 zqqT#)D3Q?P;05BbU5d#*-~Rdvph4yN2P6M8B_=xAnDlq$=Ax?X#-u&_TANGe5&7IA z1QvQ?qWSNXr~M<=$;yS`>8DZm4?u3lM>jWnt%kKOy}k<#?-f5X+#;cu45JSXOx%;j zW5iLVo}v7v%cajWn53A%t(9QBDfs=D>Au$YKFaqcT1q8agOYkqu*cdpx8B4u!EzaA znL|TyzxZaVXD*(!xL}z~_$wcRt3nzs3`?6Azh~{6q0-OJuWn%bVJz1HyriooJn4sY zsae}K7eW#@p$L$0=#*YT+w;4p$ZgN1cB0eN#^+Y)7Q{F~9nW9g{l7=L>|4mDd#re? z)LXJAc=&;TKHNWc+?@K~IHUG8GWF^b@Z3I&D!pWBc|t(QaAQznMD>=1K!j%`b3vtoyi{|P#e18?z-C^9I0tHQ;m1&F^Aif&;tp~T%ho8 z?`MHi!6-Vh>u;M$ghso;Q0DRij@_RWy(VV?lh+|@-JzI*c1JyCj>7Nl zO{@nO%^FHXGg$fV!pn_VrWRd!W=O@q4_D4A>3o#_@Cgj|qjmFW#E!+4V;%|@ zCqFX(c2)>STz9m3T4URni-r#KBs>Agjd?fwtE4>0 z-Lfk(Pk#cqWkRp1E&-9wK>2@`{Jq)kzky}miXE-^Kko7M3V>5OFNlNl#D6nBQ?wAxpUr`%SSn{Pa!g;S@3SbtXo^xjkyTOxj0{9C!eVa6yl|}eU~=OIQiUcMBjTCI6;73tpQ#rnmp$iFe)ooo7IdTe|`WQcpKRRA1$B$^P~9pw;_QCJ{}-~-&~d-Fuz~ej+;4Bfe46R z;m#O7H}J3F$W^-22o%mQRW8tpl_cl(>MwZ|(ggaU|4wus%E(5>=m?+eRQ)DH)UK$Q zG`i4o5!gGHf-)z>S@b=fK@?M(3va_5Wlr=QWxuwXo(#mkwM|i(GLZcA6rkZoLoLtyYyF5Jgfr z4PU&46_7d>0n?~FBglw(r`hS48}C1AFM+o+Y36n)RG!8>BCMO8l#cY_h^l|8vR&f% z_!0dURAQ<}Lb_FAl|ndt)&=%C(SYpzH6yIq6#U-nblvMWF7%iEDFD;eI{xmpUI@a= zYTaN-yGrQ|KPU^HBGp0h6VZf7ixTs<{VAdmSjBSiefQjGu70w0Zo(GQIG%%DOyy8p z%kfbaCdUN9%{KL~IIZgaK|xaKBLO$vTQ{W0SZ_EB&6!u6@|v(~ABBpY#fF1UNcbbL zg37JNgHG?mFBYI<;^2g!z@v+6H); z>@an>sL1`QLG_Gl?!BGlpQe@WfDF+w{->w^%kQyT)|<`)u($tm!Zo*jRJI!c3|j=U z`16XJVMJf(_CGJmpxz4BVvJL-5i^IqH>x04WO)K*_1A4^1>8ya78GaBm~S*FzhnDU zi7jD6A3$IeDcOobpw2w?4Y>n270M|dsF%@IR@V2)H+eJtIMBEd@(5EufP|DtW(w3j)Z z_*QP?BxU7RSFQ4{C~XmwU7$lAg>}Ox1#*vy8qes}@CeP|xQ>fVu30Us+8ux}H z`%>N-z6RSG5%35+fqY~|_pvbdt1x@XkrMF{%uXOc4!oGb)J5*dA=kA<l@4k z#Ba9qbWsj(AGD=pOaL09bf*vFo>#StOz}H0ip1K!m*Oc zxj@Q%PND*xA@kaIOHFs9G`x;rQOF%q>^v)aXLbw)vK6zo=n?F9osU~=k{mJ*P5)NP z7=e4QE@<)5gYt0x`;V&2b}BsSg95*7?I*9?PPLMFN;Xo{$2l{vPtv&bmE0n_p5u0% z_@0Nx<+wKfb}TIYuvp}A1)~_?0@3cHX&CBP)TVmzfa*Sij0H?hsuc@dK6mY4(Nrv2 z5n#@5_kr>@Tu|TEu-iDjK^>jI*;9vDry@H5-V4r5P(@{?)9rC_-d4QpxlijV7r67a z)#3uSQy~lfHIBq}<`5MN~xdfaSAQ-0F7hQiNa9(tzmUz}<$0?I= z4ZpH``KL!$Ds&u#nQ74e`v1fFKTK_?utbOcK?l8RFXJTz%&n{dQ*`_!I= z(6u*K(WGnTDnt=s{JxJI#k=%=p!Rqit4vaze3zll405>ZKuc8B`_95?=F0VYWm@Nm z-rlmubakK)>uFk*_NE|{P`)Y5{Q+oZ7$@!9&CWa+uomejoqg^Kj?O1JLk4K<_y}cH%A5i zzLV6de6&n1)R;gE59P`~)%cYl?go??)#HHW7TL%=j_?#XnG2QPZHU|*J&Malcwh5w zZAO$Uy~%Q;{Qd}pUF*4b_5RyaD9z)xC;9;jkf`$Q{H52U@|e>0%^Zh$#`-k`A*q*Ysm*zEQEQUjoH?qx7p+b>P>k@`f#yj0xOla>v&F_NRD9j zH|4JeYP+Ov%rQ&VlbJ|U*`=65^Eq0-oja9G8x5PYyAf2C{T095(>*C3hrE2*3Qi2$ zAkSE}(cB_P10~-<8%QhQ^o4f7A|eUJ^@}TI_)a;!4`&=TIeIrP2ZmUBlLyPg;%W)d zo^R!QtjxW88PT~sBt9m-+ci;CGhJr9cf%8J_dSCSQ}(ZR-pzNsoKX8s1KP?zc*<04 zF>t5i*Y4?RFOPwxLJiuRBb<}(9Pz}e7EM8v$$Jx#ki60vLwnt;cp9VeZG>{dCQ3pa z{fKv2his?M7B$oqns1PdnvwcYFW3L`Y9j!xwba-n>c5iN?3|S7tuqN}D6XfQCT9tI zH^YJ^hUMKhJU5vtIsl4I6XBBzI^0`*25K$`1Pq(=NS4y=aWp^C%366ncmHehWgI)t zi?L&Hu0>Mkt8gxuh-@6sjFU5lF?CCDXte>HUe?LSq!RIPDmTY_Xj6iAWsdtHPU}Fj z)2w5l7fSl8*MFG#z3lS?r^xtsq<>`X|4z_+@i3RmEZBUy;wDDpqhj|3|CqigVJ2g8 zdSC2Afm8Lz^=+ISpu!@j-s@v2c&H!#cn;+L19S0^TX6sgVXZ~W991`Y2U2AA+1RV15NJ{V~t!Qw8oYLlU9 zeN8#f&OLL+1a?_=2jze7%5EEdXIT*`hOCbx9sOH3W+}Nl+=?FD~4egF#3v;ky=joGWh{Zdr7CO#erK&m$CE741>y=`BJ35z#q>2cgU8LTgtlbj^gL95%$L^>TM!7&bVzv{FF>P-Cj$`vf7H|M#dP* zIkRHQ@P_ctl$-{RxZwZC)?3C!-F@A|f+!#e($YvbA|W}1bV_%33@F`5iF9`iAYDTv zIf!(34c#zwGcfRQ-SK%Y|M&0ZcV5mpXUE!WuO$!=Vr_pg;GprLY%AT4RQj$?!|9vo z>%FDxZ|zvt11-@VzSsa8N?OS@5Ut_lQ6^3(mGO7bf*k!P4JN;^?^4oRuIC&kcAmcV zDXL=G+iS5H@I&Zb2eH0N_y`6zf8hmKu z6;k}d22VP--j)r|e+(>IcjdO$WD=S|A}i6xFjlt)-&m&z3CVTknd_t%v-20LjbI`R zlV>lNK|xPF)zvNrI5X{JM2+ft(X*wKAg5ky&hAK!)W}uC`%!&mlx}Wf%@j;#6G`d3 zJ$^MyDpsND1uzZ-nB!x)9*EWEdKps5Q#w&DSI->5j5$!XwGk1DSu&Lh^Jvx z_iohV?=md%+sXze;omv?f4+7w0#@#>V+2-Pdl@*VIN%8#PPl#f7bb%J72okhaYesN zkRaMkQJHOu8(75pyXd7f7G);|__P}e$gQaBA4>}+>TZ-Ft|d$}92r=d4fYS+F`(sa zp9hPXnj^)xzJG1ZWLZ;IDG@o~RconqV$KXV-pF5|xr3rxt}oT2lI!>e-UP ztZm75k#I*XJ3O|8$#io}Hh!2t9~V(4FO#$pSF?9TV}J=Yv>#vgHJKEijqZJ*%fA#~ ztf0I4x{=qkF$F%kU}0x{P-UVwVjs% zL`I&jce61%tQ>}oj}a?3i%%ZGGZp-36Bs&9H_w8}BKv<6!#c~}Zj{--qlA^> zjjB+lZ+ztX4c1s~C(cJ2v(Ih7KoA^8hOdxo?ORv`ZC zr~d)k_@vS9Tcknre5?s%Q`yFH6mmLrJoiqNfd%njN)Oa7Sk1zv9ni~|6?4*2h|oEG z@->l>UJ}*G(U0x(P332pcH8RpzBFU$Ntk2to&&ntLN6ZQ_gdVFYR>wKc(h_FUpi2o z9EO7MhtfEpj`wM~4rP1%rRY{8;l8mYmo!rGisSxS!*6YC4$Z?h@vxLCMIz>PaQ}o} zq7o35Rwx4(`Y)f4+v$k=-gKeL=O28{oVXKgq`=L$yXU{BR!OPSZnS1wqYETy#j5@7XS=;lwYvsuzf0XPn2M(b3ZHu4dti@8HD#fr z2+8V@7*fvuSA{+zVZOup<7xk6vivGi3@F1sHa&hkzAQh#Gmcgf(jpb&n{UNYdOy`} z%Q5BYlBTx|BaT@4*}Cx1#V}nFBlF-FXnlWm-uposu>}fZ*-a7(++A?6COlC@z-cr3 ztnZgNa&I^guOds+HJw{b1HN4E?BH3rMRI6HT<%<#QJjwI50CNvp{G=RJtTiglt1o! zevd)$g#rcRH88QxdE=<;C{v!D?(}ZMAC~2GNVnD>-j)&L8TnVRZd`W;2Mv&+zx7iG z9@So74kkr7A!WJEfCX-de#0&GwcrH|amqfs2xy99yI2MO!F5RcMnSUQL0OL`fi9cb zL%=eejH((lE{VL075w=o@vZsN>@W{jxzP?=s*x@><)=P=$ zH*-I3Cx$szGGM-x>4N`tx~nSl-Kxm?Y^ng(ZnJ855mtTzjKfTb{?GU)hiMd^Ellb$ zyz<-e`zN2AGAQN!MF`j1oq6oNY(YomSUbn+ZP&vL8N}*g%T{2^b?uK>_?!McFrbsr zt4+BLwIYYXl2WLee@VTw*X&R^ZbP^Z4_=e-9%wwA(R~H5q`S4-TbutuCwFK{vS+^LFl*pl9uoDM@#jKfi7;Hv; z|7yNg*Wbmua}qh~bLIVA<^9(eJ1g>2WH+tG6>ps?l0IV~&X+?u|uiz3_Wf&%7zb#VmjKe0Z05_0%daCRlg#X?S_MA(`S{7sM&V=xs;cb-gAP0g9&|# z64vDb+GvXI=Pr7%=episjo(T@cH16ux<8Vf#N~KaStJ+8iEzH}d2;L)cy-d~c(-{4 z0IA1i9ukqZtZ)40&M}jr)c-|j*Dc?U}F_fksBP}Ewx662EbG_r(@;Ff#aB4OjFS+N8L$P}SLi5jRzSIiBxB`kbU z0wX732eUu`cf?+FW!5zbTFj{eza83pJLbwK+|6fm{$j^Z5pD6I`^j0_;ij)YWQ6x* zIj>Y0Y0M$TvmOJ!AfggLNITF@S{W+j`!yMb zUF7FvUh2pK<3&O_T~+(h8LU8ksARavY%M6CzBsjbmVSj&%YJ7<3K2HAxk5q;7OxSn zmJCoqXF(C*%%%T;4`oJZvkD|_8u?w;ANrARr=CSfuO`hJLLYYHinxi=zww}M6*0)T ze8d^|@pvtEN(o4MQJLnT;}(*>X*2p2MpIjmfR9yr$br zupDyZC^A)_{FQosr5&go3!gNUpvhg}P_uuX-qnaivsNm6vM@yCxA4SKeSR^DTi&X=v$_a4a7Z2Tr|cL&d&H8mqOB8C~D_&ag? zd{;U!&EJFV`|(5v5$~c_kaD5+l|)Ex#?Bg*mU$|=kw*NMj{oVK;ra(wp+>HdZ#&I| zFOvp7QatrlV(SY>LeGzy&KnZsmS>mQ7BDrgrBJ*d@Q;>TVDDv*f2Iy{iM71ZG0?WH zZ=)pVyQf;b8~dQi+`Nz+kUQvP8L|?cbKP!9dHlfnW@#!lP7=}Xw8HA3AwcM|SjhZP z8nNw{M`KPjev|k3nLY|id6&GIchf@iW018_mS{O=kr1wX*A(OEWBaxcIR&4R zJw7+z=5V8fxmSVqP6t62?uavH=C1i zQ5sk4pKJ8KqiC_o2*c+$h}e0?s#Uo1*59-q>vSiaCi1InPLJkfXu^-rIBKVH*bns7 zJX@^qI?Sg@`n!No$bCNs6hq*iZ)*YP*IlKc5)=7VB>%y4xkH-iHlv&?E-K*Bg7EVB zyhzihK{vh8gyk5Oeu@~kVUDb`5J$=%Ea_!k^W&~jN>?2F6Dj33)8DsMFDd*dX^h10 zw4sDwJw6xRmko0D{cdD+?O*0XwLsnrX*+gavHRwlLZ@;WLVky7|L;&g81-!>RNn#3 z^5Q=>@7jmymdV`i8q7Uq9KnzDMK@KK1V^ z^l2F{DR6;Eqhw?IO^~^(Pa;(xWv`9M|!wh%d?e5xyLK{D-is-QjOtB zDE1?Jh=E?6Jp#`3Wt<`OC`U~&-hR8rDs+Qade^z9f{ohhF44iZM$<^R%ApcM@_D_M zo8FJvGe)CoFv_b;M4bqk);mJ>C9;x*xprMp`HLciUMwH0UN|aZJEF~0F*Eh!S1{+n z#Knym`@@Z(#$YjE*9fc@>W@gyz#6hK+Es0G}rA2bom3Q*Uoxlg!aJ3`wvr) zwYnFuH_;e0upH_H#YwR%OZ|we!(Hi>3Ks2b@!QwO86H zT#Q%;Oe|o*65YwAkxuK#RBD}THo4EYZAt;0cN>TJ!59|z^7wr0wuSyl<@w`grr&6k6Y~S zD6^Is!tZTZU1n_Kx$T|l7f2~}wa5cAq~({S0 z9*-BVN`k~)$Udr@N6OY`Y&+c6J%+cTQn&fx)XGeJZSmZKU^jF;Ef=%-?In?43i^qQ zCcjhJ=xw$beN)#S3>O_i_how9JE2t%)orh0Ztm53kTf0MWC8tn2Ke|9@5zZKfY1wf zZ)a2hWrJ@mP3Ovtx^RpmfiA4`+2$8FQ3BtJ8+O-tovge_?b$i=Jb)4e%GFis^=7go1?#SrVs^8Iy?nQa~?+ZgMy~u-qz+X{S z&%L}_@W*05;QbWpe~i5iOX*l6((pU!a)!o$e5<#WG&rG0QkNB00()M46_DYpP} z*%?(Z<*z=^+ngQw52enlNb!REv#pch%fL#+1&Nr^{mJv(z?vU2`v|B~WzTk&WE56OXTuws~=-ZcnsoJ>sTe&~)yF&OTwXH7H_A#01&yez{ zhF#-p32nUSUJc?X(e5zJhrl@31>1J6C6E)E-l@Z?L9FqxJRgIzPjkrUPF~F^To=hJ8<0 z`^IN=Tc4bQ#bkhkSq6|ieVUV0Igs`c2ZBVww4RiUb|cz?c#w z<-Gez*Di=$Wd+N;R6?)Il zpsPwsuP4uHx|L*XCr#8EGXeS&KFnANEf^5$>->59!u~-YaYr%6aPh6S;tSDCk~;T( zeUI_vwZ%)*Hr}o;&Y;I~baeK#awg18wU?cy?LaJFpC&wMLw`fHefMe~bn>mW!SAy+ z<8iv^nq@BPYvo`YRiY+{cP|I<_}u+fJ(u`CCyu+Ru%8@qvXAV@!ydPy>xF!eOXRNj zVgpRgcm)!5|wgP=WA3yf}V)C{L$G-};^UCr`(GmbZP`%LlT7M&nXXc(uW)c` zXD+!G9>w-+T^Sn94xyw>4fnk=gk9ENGBR9tH@*T1s6DJTow&b}n|G;j+f*8VGO0QZ z>z=N@V93v1;r_bRlWa4{4hVYYqHFZ9*biRCvl)0HLB*1i^p#QP6>2B<-v|5z~RLm-sCnutoIk|%oLIHuiR!}3JB%qz`h_K#2vyYL=iMG&_zPFym8{$ygvPXOYJ`+R&Tg>Op? zy2oxr9OB%|mr^^<1sZj`uOou00=G+?F~N5k@Z%T!YW#vkbDDYM?qJi>`3@ytv(!aZihxQd$Wx#tV}}bh<0CsjP8b_po*03%k(>L)i@_!+OV1#(Tdy%;WOf;e<$t~ z(tsxszNQ_2DUts(ecK@jXq{GGmf^VWKg2I_mZg{%c$PzPpJe=MR^2-Fi};~2YFvUW zFOCdtBIeau$!ls+X)(GwTc}Na_iyY1izOV+O+p9ue$S+{P+a($VWaT*I?Iz+@d;B# zDs(jFpD7##&1=YhD>vVW(y(*CM`aqmfsLJCm;6Mc+eEd(LVQw+Dwq)di*VX)Dsm7H zHY#uvo^MwslL{(tF^lpKlkINi1o86bWPBesz+QLsU3(3Xe5Ny)OO{b8kqG$On`#HP zhAOLvh;NQPEpCs;yq=mBBMAJ?@q`A=W1IyUI6K#xpG)dG)t!Fw(TKjIBxM`y+l8j= zNB==J*3Hr$PWyF4$G@FQJr&v|pqD%M3 zb(#B1kkBM&eaIE*3UML=PnmpM?B#;Wb~eYu#0SnOj7ZT6#hBPD=H4r6C9}UW_A2rd zHfKm=Ez0%(Okn>Tn|p!WI>Zp6u+Fka&X7ByTw_HSD1byePy1xmphaEgktOrNO~qxB z|M?2mldj8zEgz_3I4a1O(Hr^IWLK;NVHCx85)1saIy|dMHVozNepd8lK_dvhsNcw4 z!YHH_vGsG*9DrPfZSKL#RnT#qw0*l)_F>A(;ZuFQH4b2e|9mHiYY3cJjvA#o?76?2 z&MMd^HmaM;W^f^2aa$rtn5b?q_wfLV>gp2;!YXgD64`&;wsKr_xSc~~b6cl3p&a^V zEvX1uFwV#eP;M^|659gD?_z%98Trn+wc`rha>_S$Hp}7|vI#$#O5D2HM_e z&PIJ+GrA^(jl#*bSd;&}%4 zD2}@U?xn>Xv+dHMBSb_^k)&YjRn0B+-R*PI%MZDl3(3H|*NIff)nndsqmp5whbyF< z;pN#ZslR3Wwa0+m%d8aAv%Q2k909c>CAopTNofYtD_X;0G{h1 z|EOOud87_Zr+=Tl{EvSGwSyA?pZ+g4gAw?ROXCg$k@U>DrnINH@!oG7q_2XL5bT7z zI7B-xqat{@%FYz?Z!&v$G}fRDzz}IYz+G>*_^&qjyhV6 zKcheU?#BV;sU28l35K#1BfK$o^PUz zC&`8f-GxULj=!6-h*3U>Wj8iY2fhh%{dpD@>Zsu$#!W`{P)=tM1aDHy|IHe3~0DAgIvx5e|m@$2BR&qT08*m{a`zM0` zX7Kp_@&80{qBj9b`_|SM1J{REPh0T zE%6Trx+K=nw|>)m@~>vx_1~D}s`1jrD6-34uvBiZm3gA28$O(B5w;^leGOt6)t32@ zuZ2&76S+{cNeLsqSUPwuw~XjtU}|G(j1mdSZO?FNw|z|YujZ;}tQ9Y&@$*7-IdrRw zpT4^dpWL6r4=UTUnc*9dhBm%7y^?fweHyGmzB<(%YB$VKy5YJ*jh`{|lGVBd{Uo)K z@A#Ux^5Fdp_k55wA4znx{(SEs-K*c?zRq*yp+?PJ974^hkX~F;2o6v}Spt)qy(eDMUk+5P(^4@Q4m zan^TxV|oAgE%>jmOnHm9qp~D zJE1&nynL=xqyG+3PEqQSr8SVIdF}wz6;O<)Z_#rnJsiq6hKoFO5@~eTHo-Ci5q;$;>T{a1LI$5A077{?tC-Eg-+8!E z3NtxTv|l&dI;8y`5+G;=wmcSb*egTsOQFu*cW6}{5U*MCf@hVk#50}bB<4#WUbqB$ zAI}!QODX%s7#3`}YvBDcJ<+NYofs>-Pt@Bn1M3s({(&W_4D`y?czacZbJu~>&37|S z4B{*0uoH4TMn~XJCU3<#_`g~JLctW|w~jASacv@AgJ3L1_jGlI`h+OlhY!M9m#6N_ z76HaR{QrK@781s7LBAq{K%Dg}{-nP$4oq!FX5i!ki38aBSV9#UG&Y z(q-kV=ic;egNWyiEPZL^hpndk;l=#-BHs<3bT@vVwuW7Ohbiw@)Mj)lDH_lBJj(j^ z^W~orOY>U#Pa8D=Gqs!IMGb^_-eM7PKj~#mFwfl|3i{c2KPG+j)Ckn+uN$V-$t_7d z6@GA%A*=Yg;FJvJvzD)eq|?Ip2Y*3%+wLL(4L?CrSB1m4KTY=#(&clq;(`xs2HgJQ z7~#yCid-P-PiJ$HZPCE{jX#TYc+b;_?~;ye5FwH`I=8GkHn#)4IkuVSagH zJ*W*t69gp|%h60_2qh&tm+~kv8gKR6Si4mxe$zDLbe+{GE1627T=zRg^)9?!){CS( zl&F)<%A_GyJvk0uT5#|?`2vX`LcTI=Q1yO`m{!dz`l6_<>r>g>xQg&$tb82^{kD!+ ze`{jN>C!&uHQ1WP@6xkyGvQPYjlcZ8l)+pC=kD0cuYD_U1&!=4h(**Mn zg!3p!@(yu~sGBbGLq)sqr%`s`I2Ad3VLzkHfGyT4H)?dPah+KlVCkm>hSdAjx)ACsd8UJ?guD@6-$T?OO@}K_kW5#`PyfGCh*L+yq|A0QK>GNlsM1(gMjkDg= zz76jV9LRZCfAE;hCGV%VyiqeR+ztq)`S6CnvOw89T2J4WbV^IAq+K;NcUG%+1zv16 zbzigLHXoqqKcMp|$4WAx$n%^vEk9$&VNxv3YD9NzhdSH)sM+9a4HJo3r&0?Db|?DE z8%-E{YFWMopN;iPH7c7e=iz**FIvGu9LHmxMj0EQ5NY*0(&2Z6{iVMT-vGLn9*=~o z=NilwK-Ik~FGNke2|_`+Z%z&TL8qzrEy{UbJt`#aEju}pr@s^$4Z%}I;KATOk*}ssPV7LDwQ#^_MeR)|N{~2KmqSd4eNW{#Obz;qz1MmnE(^?_R*!7BY zRy-5pjn(Cr?@rZ;U_-x<{rsD^c3)dbe=s7LmB<7pz!?%<$r}*6A#NxFEDIr26!*Uw zGvmuX!@~Kc4CQ{DJN0_c>GxR zm9cjEKElJs)}00A(LA=ykv!<$ke>Gr88sqmDMuIvS`+4*HzGbde)PHc^Bebh4_9j8 zYn+2z*Hij^B^O)JB^T1R*UJJmK|BAKrq1Mgk-rx7HF zu7qF-Wm9HR*LYiH!*^#36AX6dq*=n^5KcgUv5%flU0d>!Z{EsO&EZ@ahL@?MuYT4S zH5C8Sm%vx-S*zgyGoRJdorcit)J@xG%~y8_Y=Ft4SRF@sRr!ob=(+_m7VOo#%5E@! zFco~<5R5TONq(Ak$MZ<7lbxX`MX44Gr?5mPzo>{%ul33*UJ-&ODKUl#!)%~O@F%Fj z%Hf78db!G&25#L_I|w?q!BR# z`zUe4u&{5^ty_ZMnnT(l6nJmHdBl~SZpQI}- z4#}+wl$TOG=Sr00#GtLL$UGa9m0f4&S2@_-23QTd%9XpDP1>KNWQL;m7()XhOE8O9 z{yYukG;&3I@ab-^C?-!rR z@Xtl?Os@$0D?t3ujP0Z7>Slybyojvky9IOxhF}4t&%E*Y`@!Er{`?kAnsx~aR#Vki z;?vNyp{u5H>qPQxMi8Ky(BhaF+IN#Y`Y(n@XJzc&Y&snww}&r`m$b{y0W7PQM~Nm<19%Vs*|Tm@c2h8C7iA$N5Bg)_+&66o$Sd6G zzPym^BZlm@>gnn&&$n68l%%JR3vSmHAh)OYZ-}?N;&wiY^@H{5EAu*cenKFAA@245hLiuJ?lZCds`3#x1 zDgjF-eV}=4kX<4EGduX)@i`x3J2Olwqa{IYLcRr9qQ%|nhN3MiE?hmgiw=6rJq^G1 z^>6np8T(bZ-x33;6+T^*|90ppHvdQiS`gt((ZOG}dx)1ANI$jbXIg*_qRQB_!WYxp zd2H&i8xA5at%x^IAVx<&8s`Yh4Ou+b+?_O;c#5Sh<$gi4tymIIncqT?AsGx zscWDi@4+7Cl~7g~7qb}95zDcR3)$<8q_EKuW?gg~5lidbT_|liZWVrYfPMV->cECn zk@0{Bb$+&QdhsSfmo`O&^P8?wp|K#U<(Oki!#3t zbeZT)(ch#3HC1oBBGzknh@av>R8rlZ8>9PndcA#lx_JPHgVR> zzE&7jM9$;jivUDo)E1uaI9bHSmq3q8Jo#~5)E9BOT@TY@QGsDyg|w-U(tQ2zoezr4 z>H)eZ_{VK48On)B1$J!LA}1c(98}jRC|V5IuiN+9{E$5_Q?&%y`0I7BY5BYnsWFSP z&rjd+_w}8Jk~%B!?PfSQT`12nr!oi+{B>a7!MiMpeB6@kP2tVUaY2E8Ll=pvH| zUSCC^YNTNUk`WD2OsH8++$kCg9HMd}a4~bD`^P7~J@+$K-H~1j!s>nb# zJeS~c*%tbOxTCkH-N`>YvN`1y_7(Us(8}vIF!m)BgtgLIRuKQ}DI&tC7&Z&fHPQaJ zd0$&7HZCbjz79(75vi-MzMDt7t!h~e&Nn4gYuHwnD3nhDg;m;se72LXEnXqc9F~qI z_pC4==ZDz6%)md@l$T$R{QR^eaCNa-wHw!Mvk^SQ`^_o?SnVf|fEVYKen)DS2H&BL zetF;u{Ff=s@BP5fb3ePzn1Flbqelx2$+&L*mn&iX=GQJ7*$RXHMfJ$d--Gr4`ug<- zjW2g2<@S;plyaY_=89{+=2~grADbCY7(T6(g+h2}K;2KT|HHv->a>6DS@^LeSFR(l z2f0~CN@LKA_3e*g*SVZP2iKeJu*&W5ppk0|Z#VCQra{Cd((CK`DY8Tz^ucfYo+tK% z4-K=xesh2-SAl*Z0r;>21&C_Cru%VB=+T4%Xd0K#0^@L#*M{H0p|jZBD4fR&QK{HW z)f0x}x-aN>To6|&fVQ^Bfl)6>+z zEm^{LZ(n(Q%!+Rw@3T$Xk62fEy(GQZZuz?$Y{eeJh8gVF^X>0r!mgl~G* zwSy%u$ZxCu(CA(U{hnzyxPw6W&(RaEMwrYUXaiilnAQJbrGw_s1K>5f*Va~Yj%m#V)9V_FHW-=2ZT3N{7N%xeN zma~0iZ~qDgIV(A&q4a^U{d4p#jKIU*!S}0nBtNzCxtrVRw{ZT(TKS%Os`5tB9b+`S z^?y%b{wMUOU<4$k3efusoN9kAr+tHvqfZN2l*1Z7h*V=L-A{bxXDvsI3+kYiAZ znIW1%kbPGFWM0UCHYblVTym+yoL6Nv2_54plD9D;J(GTJ-ara}G*bs7ku)0K!VMxky)d-~ zFD!ktoPl^Xh>t4x*MOnfM5q~=6?7L~cUY*5Sf=XW_DZ_&v7LeERuQTV=S#Go_Wd(V z!*d@~@CNBuQf$Q@-l89vg^xhD!%CMsh9&Mkw4h-lsCeztuC4u3E?x36W% zkVXxgaPLJ)TZd)l6n!>xZft@6c7%3G3^q8Vb%-1A67pN`YL|Rxz0z{hEyrd$S;Fju*Y8i2kp~WOzn4=^=9J}R z_|ehs*5g)BR0=5etH=$(=|&&PAjfy-m-0xvhfDrXd&p4iY!AaPAB=jNObTt8OR1tt) z&=vSP!&R?RH>U8=Drtkrx~N7;VE_|mv#D3@r%jYuF!cdaN&p{anu-_1%>{_Uk#B` z%NiITHs5MAz*4lHxD|0cQkuug={Seh7|+#4ULxyIg=(i-RQf9BWA8nQN??mP$7XFq zC0CDH98g6Ko`&R&?|IphZ8NmlKaA0}Y@-XS$gOCQr-B(7i$15(vES(@4ZorIZ9I4LDS2r^w&Di*V-p#R(OC0N~TUM z6wU12MNyiQKrx~FuVUBmIDBUO!3m}Z(+>N0z~B1M(QPO-a+bNh`%MwPf<(KyTibrRA-=`i+AJadIM}M{dAAxeYIN5qGKIZeWr`p{4LdIYucke2AQ71l z)?VsTrag&8%HD)u5J?sJln8DqP>t1=E+jx*Ef?NZZciV^Pf_s{gj1cI&0_%)vi=^y2znZ>L(vA1$vDy(6^&Kj&K-eD<}5Pvh(OprPNlvY{i=K zI;X~|hYf}eM*g)vKKSjQNf~CXklEj-{?7qpgoxq_%3{T>%1uc3=g1N6jEQ|$Xpv_? z-lg25{SiM_!CB&K1?y?o3>F5gwA%9S)ZO?|glJy5YMnBx#8j{IOex| zKO8ns$D%33n%umf0Gu4WzmTf$WGSPb^) zG_kDva38)DxV@eiqks4`M3|iUXT1=E#EY}`pjfZriL!7KOCmMMoBlnkq0uy82CH;$ zvl`_s9z`C+EGTZsMtyuojlv!ROeMq+p7}`*aXFjwVG&`J9%vAHxYFQhmxQP;*ASjU zq{EL?Kz}|M&)3hXjo>2>k<;1zR#Y|~HJ_aHSDb!CjQNyCer^5^O=8h%0Q=nX+I*u4 z(e<7>fhG6k6;Y-c5UvHayWA({%)BUSLMT`ZxL)n4(ARC(`(0Z&#K@1n1-z>~{OH&} z^WhXvqMbIf#?Iw{v{^2D95sE5N~P~EHiv!~3D`CUzLI1AJsVPf{^?Rf zk(WxKJM8^*Lx9OEo|GhWkh#m`aI}6zXj55E%epG*;bv`hh<>-e(=G#&X0pVS5?`HJ zF4-5EcMofAyhaf9oVK=oD7LVlYE?D<^e?YU?DHam&P5L<8G6@YBbaEf5_JE`9Lg6F zyBG;-(BiTYh#(Mf^NLm*gxHTev=baKlon`aeY!8Gy|8#DXwTh{Quaw8AKe+SzOs1*> zv+8_{r2so2S^?gg%*qW~7zwc33!&LXx}icV3!jZ#t5La&9p-2-9F~Io6-a;Vy>Zm0 z&c}t#txOOhdd}^^RjF*DvwmPFf|qCCEVK3}C~N0c-G;@aV-Bgm#V7dk-Hi%=iQ`7$ zN3h`SMe1%&vxt_iI1i?9Pz&ug@NO6iSjv}I@rOx|63j()6uiBHp{9kbuW*#V_8a|D zO<@2rH2Z(_L5NTSJRQG?(K~(T{Hq85huHK~dgb#7$m+}H54gH?l1jkoAUd?qn;FrK z`EijYe{Orx_%bAi8?7tx*7Nd*u+#R&AS~wvF>!Hlqe1Jqs4085l^r!>2mP^H;kmqZ`^4%N@{(#Ss)_g%90$n|c*& z^rwC0veU0_tZ2`Q^tq7#*)eySdmm+&@$9<23pQ*?QnUWsO=Tk((E5jtScFAkq5?j4 zFwFxPs$p$ye?QF7YkXe(3Bo;qZ~$BA^?30HV7@dPgxfMiTd;?kRmEUL(9mQF{JMF{ zz)WiqMyF3MvRCsd&n1*6x+$+nqhq%^o}3)&XWwF2S4h8A`@3O!Vyitd2lIw^Bdz{= z-2ce9u?r#7hN@=1oQUx8)exie7g*bTj+WQ7g8t5;aQFDb2tiWi?uS<~s>v1tpnDu$ zx;1%v?*2&;{R#|-WQn5IPn(OT1H3#>H0G@>?LT(HgSZ6`E;iXwa*xZ$A`9guE64#N zUb|t3NpSUCng81vJvO!dZCTaP*D^u{4cy!69H;lJY@ zY;I7210zzYXE_i3a8U@?N^)Gr93Q6~7&>tiqIWelqDrmu`*=h8YrbqNvw*}h6CX@7V^V=KL>I~yf> zaC|!WTV6+~gP{mZ*T$YjGcoSBUe(DL=tk}_H zMUHy03bs;xc4PDcF6KY3Qld2LGQL>@QoAJ48rOJI&PaZJ`>=56cx{(b^^JX@XvPSY zYQ(Ewb)$x3Nng+3vDOR6tnb8k`UacDd+*8`ntb=+&C+M(4z0ko!GLjQ=`vCj`Kjs- zTVtZO9}MubEIvuLUM+!zk0g)b^<;gn+p@k2pD%B);e{!$k}xF92P~O5e9z@vN^1i} zhs}o%biKsqk1z5kSX6XqjomZbHQ?-B?zZ=bRNyQ7#rm@3% zQ#xqxpuZb>wBvRxJiXi-|BaGj*iWfb1|lL>xMf;@SvJc{rslGiQ)_f#=Rw~HP7V_Z zE^nC<1RxM--t1$BPzU(#C6ow&;1w|_=N@&wte_MDpZtzc;g;L+e7Y(Jw+Jkf+r639 z^K&yn!hgSZzOpB=A3iIdjUp;kC?b8rt50!*YxQ!=i^y#ntm6t&6#OdEsIeS39Fsv7gPm#l*7I zIlq;oo3>pH*g+`)H>tqk!(p6XkEkNTaTz01*_n?DO4dDd+o|M98(o(Y;;=#IDd&1{ zQ?`Hkfh?0=UFB`f8qYK!nx_@FXe5BNqyc@poXW!xV&E>6tdMzgqu1zH%Do~*w{ric z2{`+U9GkNFScFV2nG5-T7L5fsV zk|`=wJgHLt=Jb2}|3}tWM#Z%?+a`DdK|*k8+}*u#2@Xkc_uv}b-7UB~39dmKf;8^l zSa55c#^G_^z4yJ7@BZ4qdX3RzuU$23)~s2muKOC({VHo$g?QP~%;>fQuo|2PbWhCB zH>SugmDQfP&jngT;flI*_hM|oLAZgn zfGWJvj8A-+cI8qQXY2l-F`ee0m~K3F82$Bs$8^Od7%{MxR8Xzh!Y_D;rC;w>)#KG{sx7EQHgv2k}G z%3?8W-Z>KIzMPO7awNhtJimW6{dDwmt1X7$?ITPIVv;=iyuL%!G`@O6F1hNqe|EyN zNMwd>qr}JRE$Mvo!{bf8l?TE3Yd^)pjb+QdOh! zWkA-ZisAcIxtzPidGxdR(b^9wowrl=9dAE=2KwOxJ3i!K+0FA6tnEnHWOmb?H9iqu zm*1Gwchg&_XJa9F!nlALyyH!Bt+Pu%XPY-3J967=I!?ceklSLFo^k%OC~!U7$7dNk zMEHv*>4PYi&nWx|@1%i1S?bEu2f>`zWnc0~L(-o0^Q3`s6PNRh>HQkA6go>hnN3#_ z<8Rr`V`H@G9m{f0DHosvAIEu44Up|8uK#qs;4=BkM)~nW(X(8ks%kEHn$%g#-^D+) z0G%#6-0${2m-1f!&*%DIMypYwUH30L{8$d%^=E-cFP1Ia(1=}wB~riR?S{`l@};Qi z_STMvgIOqbho*Z=Rq9OX=-TzIh+Ea++P9UjexgtezjdD%mGe^Jd=4Z=Yc7+qg+Y(DEuz2;BDr1ToZJ!A$ zWkXiU^)qIiCX@D;J0*wjE1RIVOtUnnl<9AockWl&T|kE=&CsbZy&VcIr{LCypOc&5 z6Q4Cdt~zHw352Hy8D)EA&`sT`;E3vIHVF0+H6O$5l?bpSWh!Mc!#^iTL7u>>Is;WX zCoSb?i%F~}3Hg#xCR6s$ANdkJ(#`Wi5*k*mk7+fKCcR09Prs~&NgJxy9Bp6ffB1+2 zMacXpvc%Ais|NPu=h<$w$$sTZbup-()K>SG=;LsmG;Tr{qAy4Q0fD_-HU~@_e$#N| zYD4YSZ@eX`DjQ%Y!5F9dLV$XSQAQ~;aJ?pnVB+i#!1ajti4o4x7?kZkq z5EHqM-o4rulQ0;D^ALeV9&QY+pQ1Li|AxmX7Hk&k(E++E1$x`vD+8}RWq?Ug_esG+>$4(i!BN(6TvfIq? zo!L8TJ>I7QK4f_hNP!Q`h99QDS)a?!Uqy%@8#)+_qi5-TJI&SfFqV2g7PQNEr5`#u zuuj!CUl&R%WkRSkzi30E08u{P+si5ZhuFD*{Ga-6Fk0mC_b>!L4hRyy>AiqRwCetR zo=6(E#{k|2V)Q?Jeiz~=<0Fo2?t8llSoJNg8dvq(=pnk=1-?k~dgehV)eKo@|2Ybw z`w>Cbp0Bvqui;x2<(s(WY~CM|pO<|9*i)OyWL>I-Ex8S(mKh9HjiBZaq%^j>Gv0pU zNlcCvPGnX3`cr!THf@R}Lh1xR#gW?vwXJunlW}cGul@+k(KZ01y=zF;xCFqjRzG91 zZC{~y-@oq6VBp-mJw-69iic_E!-!QRJ~QLUre|bq77++r@L;P}D{2Zqr?Vwflfma` zqfO5CWepeHTid4NibR_oPP{COdy7s$q#S?w9z<~5pPTVgGES4XkG4Gunl~=x+(CcH zSfWg zeeRrsEo{2W#YEGczF?l3ljD!h)I#M#j{80(bygSJ3q2gY7@6c)(`i)cE*^JYG(mHp zS93j-5g>utVF}ma4E7luHUhTxbu+X}$UdXFr8SrL4wS9;V%Dy@OJ6N7yE8RC~IQlk9x&)qx*@T#@!CJc%va+Uy{w`&ob2RDI^ z?~tZT}l@#X@JU*$hK}yYzl0-ge#6z`*D%96U z^3{8549Yg3vs-0Ss?tT2<&z)|EucOx64oW13%d2N>Eu_IXpj~7HcamhH>7zSL|Zu@ zS2_RAWU9vmqEVQl>#k|5W8dUEVeq6uJPCGr7*2dxOqB&p(w41W4fCcyy4k1c*CrSD z(xP(FWxZu**Q2$u`l36DQ4I+oN0Q|MTvf_Ucnbs`Y@&;E`A*W8DoNr!xe*C(s^5wI z+M*6h=O|*3K657gn7G`gyram0UJ@` z*G)G>M)^cU9o_<->lZ{jj(We;=bYxTEKSKBUm%_A*9&5lftWhGbm0vv_*< zZnnv~&`doFKYIYAzo!1>hXTEmF9%sh3Vmy(a7#{pah63APEnu;yHh-M?Xi~*M^p)N zkSW_A>cHl0fWf3p-c_7$b!Un4!kk!vWQ#2*UwN%XAB4>!e>)eKJFOCAQPO$}VtUoS z$fqSKn$Y)2O<&W#g#GgGyyqPILNa>9(Vl{pK{Y=2$jUa?&+BJU$eN$`{RIFBt(NnQ z>$z)LzBq|E0m8DX?VlFoQYta_{VX5uEbNz#Z#QV0ygE-Kpp3cCH*SR@>E{32_EeV$ z?%vD$yKurqa3Gl|NcHn_v)$v@*(Q_tt?+pGPq)!FLMyA~;L%cW%gTw*(^V4D@qQu034Izo-{gK*umBU=Q0Y+Nv55 zfxdwHMrJ+tgC0YZL2;xO$@$zstjBa`pS@%dn5?5jnWI!V~ z#WIc4jx<_0FE}rAI z{xJs?Ri6HF&v?pl4jz$<1jE_!GEfB`J8XW@20UV@!)}#N-XRiTZ?)|6KZAhYT6# zzSQQRskrhq8BUD9aJoh_(C?rany{tTs)r7$43XBx_(S8Za+P@Cgt2bepyxbl!h_-0B-hmx`%x8 z{(65SQ)NFl^S?;XQyP@Lj*cX4FS~MVy)jH`$A&cgL5MGH@=yr`KT($!J`1bHeJklW z($k3{n?=@%ks$b^*mqFuyW6=Jo@p;vHg?d%SPZWFV^#~nQXQoh?SwW`Zt+@e1N6T0 zdKwCNu+_zGw%XCX*{|vT4WjvR&rPXiqxH&c|Dfb~IQO=(v$T?KBo{r; z(hla4lu>%1xU*)nkA?0$^n6*mQ}9VqcjW}1ycW)DHG!KouvI?qYIc~mNq40aR&0I0 zL_PBTTj=}TXqD{PVjoxFq@XDHRR=2bW>BZErMg6nNMbG7?Co$ORet8kpbpY^Y1cu>z zwJB3<^^At_v}BvRXm_&^)@+e8+Xh}@6>JiIb3{d{1!-k12CsWwYt zh)pP0R#ZI0na}tAl|(bAnZr3ZdvWN%++)wfwT+!Qi`sM*`|-MA^iohM`cuNYt>+|G-yihzpBUf1 zl=XcY)`W};NJLJ))B<^4n%Xj&xHdRC~EOYWK+y>D5h>K(ibwic8HV(kwh z3f_Icoy>&?RXCm+c72gB4Moy-KeJyeSYj=kpMTxR93#Z~$5{Wxm)8G)a0rJ)p#6SK z=juv+jXtYeZ9Ba35A#S*<-0F+m4?~P6XWKoE=hYI1(oz?HyU!slafaG@3{Li} zNErMbV(7>LJ6EaA+z$b%j;s~G8_?q!4D|dnYIdU(%Cx_kAc%Dk7d-r@dHT&LRDWjh!LLe!{dpJUYkI$OBGM{+ z^_%e|Fg3~|QTgK|JPE_b-O2NyfnD?RY4^)TbGv|D`#o|({4V>H4}dshK*g6i{PFO= zvD4}sI63q)0g(Jt)e+ulxVMxk zyVLJ$vE%q=0H{Ya@)P5NycWXem}Wd-z~3Qi_!hEB4G8ZDt}wk z>oK(>HrL^fF}Ld#dw)k#`mZd^VqzmnL|@HA{RlHxOd${9cqq zmy$0^5GjKs48l_*Il~AvUdsKtMeGTYlI9w>#eZK@QZC%k%jikW?1dU-))tT;P>hs( zCbP)iP%TUUN@k)*jGdi?j#=Q!E8HJ%*fG|6Ls(hy8qFf#qnpbI;=3C*?g71-==I`@ zzSz0T5j>d?O9%d}L0$9V^?E)sdH-AkvZm-g_Hf63gooTW@(50SfCkzpscalEU}wRgY1JO=t2v@3smEDH@Z#;D?anIW;b_Pnk)i$wmL#! zR-$XalSzI$?C`nArzgqu0#g#=xw!px)@UPhZHda3(`5EI}b$(h@d$TXp5K_DEkpHX{?MOyq zu)&>b)_uBYm-83wV}DOaoJu_8c9YDIe16}{VvXjt`T72bQDsoQgq;Ok&^1}O*Kzwh zQP+TPCU&yc<2BD-&l6+=DdWB)oR=2+@-m@!*P+){JGHsYtHtjZF<|?zw(l}Tpl&}? zMG{gfKVu7$V?xAmJEPu(Qca)`k!V*f zr?%S$pQAK51zE0%36fz^4l+7%K6&QSyzn}CzV^5~;kOvyh3u}*_?!jn_hVy7$wxJPk1rx7?yeHQ;-=cB>tr?;e5A7hN~8d%$sf$>XM1b+yb_k~cL4 zcz>OwzzbJ#ZR!}#C}k$wm!Eef&au<;j#U+rfp4bU(>qmnz^j%A4cOO^qV5#Iyd$E z*yfeQ9rMn$RxkVehfrpC^mGOP&fpuwD!o_E3BgeJXT#{2?MIV}me*yuw|J?r?<%Cm z6uJB*h$nh-{tCi8=@jAVgm-*7@yYR*$ zG64Nd#xYT(4!BT)7V@hzKJ|Az1|;=0>BJtNagqqren@$=rGM@!O&nK5JGlfL7HV1de9==Kxr0u2=cv4miga z;BogIyGDh9HmGYlv#I%cw;HtI`=`6~Wm>EM=Tfs@-#e%+qtemiIW4x#KJH)F=mXqE zgny)j*4hB;`K=T%lR@p_cdnm;LW=yEaZO6Gv@34fwD}Ln2X5xFgb*{{nXtPL)MnOY z%r&yy%kMGa_H1Sy7ks6^l1dV{WcQ~b)?9w{5a5h4hH+oU3AaU<|3HMxbw8eU%iC4k zONYmI4T?(IfJ*Q`OlGo>N#SPNrL80V0i5#lEk9w&>)zEe|F|L|EqklC_*qHjmKKX) zLp1`Mdsl$w{RB(6BA%eQXZa*A_jkj?hx*ip*?GMwMdESMgmrE;f^XLeUQ?*afF)?7 zMNA8sNtLS=w24fCUP8pijFqS%LR#*=nItBNhGnqU$!ej&Mm3j%gmHwc$;vUkw}Ak` zHQH z3zy@ApY|>Ac=eO-IxSKtL41shOXSKhUt5#^hrzz#f$ zqtdt(&%>lccpLEFk8<%myJY@Y7rU*W{->Atoo~aH`O0}fC`aIDGLMr|-aY3-9K7|= zbRr1vnyX%$gO=^#?WlqW>;FIDUH_HmtUS zJ+c;n3T7`LyyuBLy~WsGeO-olHEX@t51pEk+V1r}+U>h%BTgtAE~+-`zKM>(w?iE0 za0!w z!2_XMf2rwOwCywtTQCd{4cCG%-bjmQ7KF(o7ct%d9O94}79f=kc$ z-KCHd5tuim9{4Qgh!#oxq8_-}=!Zfro*p=%dMD4~_hZegGoAOgyp8fHxnBJbsfGVO z?d2L8ieNfmm8ZskJ_v7(?}=ztNeMfx1TreYD@&Cd9biSmGv!PW3Pf6GjC9Kng!q1g zyLGP=Ppmt_{DR!UR3b`JgNpr1`(VhA-)0};!{}{5byZy>BoQ(X@EwrQH`|;fg{dFVeMckPzdbg{ z`NZyzA)bVr6|J32sXiF-_MwGdZU6TEW0b0PaxYVDRy%H}(Ts_?M+DcsHA+y>hcww~ zJjD;V#u6Iv66^F^X@bSswYrm6SWfmjtYtG&&iSetp^LhzCE~c(kCP+!Anx*A+3L*m z=H9fj_%mEwR$9Mt7qNd)^cn5pDu8+Z-~_XAMo;mVY*z%RGtwx={z2$h9OJJ z58bqo`R4}-DCot>a)rwILj~?z>)|RN=-TFYg%kJrAgBf4Mt4)){|C;0WF7UeSDnok zjuDl932|@F(Oim~oG}Yn?xX|*dKGX%FSe4@4ngst2y>g^AkVOJx%y+rKi$LTDbn!Kmk5ISpQa6 zf%7n~7UK5^U`#;3xk)$*@{)L4AeALlLxo+JZlMD>510&Qy(hgdC0HsUpN7(9O92z# zi0w~wKKZjkqH}>+LF>Q9zS@Isdkr9&QU-nHv-oghS7M@<>DHUtN2zH2`J!C0WR1be z)4@*o_`oG1?|3_)QIwi4t&3%$pE&XcQ`f=&cmZTv9oU9(e4rMirhdQ(O5Gg`^=k=Mk!L_94&Ud2b0;d9AzVdz#(cFwCmk zgrcEF{x{nF04F2<>RGcJsChR{ddUnN?qLD6i1satUf?}Zdfi>4qhnt9orYB$se~#V zI?>c^@jDZhXOfYV7Kq0FA^tALjwXFeblHvPs=HqB`lIP2WUlC=`~z|(P0%p{28H#3 zGy%nI&T>r)bg9k21R%{z9FNI|E|e53;*MN98R`d+QIdRBA0$F3ZI{S>{9Ot&c~c>u zq{rV`k@UJ?qy{fnMxkAcy$JGY=(Q`IwqZ(k!ZLN7TO22c`IzPv29NYkDy`XFAj{yBgaP&4QX zHnCP?a89E6VSs`n>6XU(F5MKJFD}O@G!2QKQ6#;x!|%}zt6^IDHyP}--M+ZBr;OWWrc%k6UU;wdX$D}yG_c@-T2 z*XZ6}P_?64XrXK<0bE;At8J^F?6InAgv;;^~W)|xq$e@PJRlG zOaR^DseT~UWKl(3se0Mt3|Uq2muBz&=vZXXYKm6XU52b75bhctQ!vv!dO2=+YlI!c zi<&3LI=6(_dY1f7aY`cGleN96u;Z6m>Cwo!(P(`&0!sp~p~*u`q>;&&8wrJUR#=*R zic>_ThqP}EZHO4vbQgjf?MjLb$FP$)YvLbuM9K5Hc+|sq(X;N9M@U23Rzz-7pbX)R z@19WVeg6_(c`W_bI9@ZpTvr-ZV-c7;Mkv9-O=*jIyDa!j(||rmB$v?4?!@EWnFrI9 z1Lop9@=u7Lr%|=zfL21H*u4Jv8VP=Ml}@X^zdie2?(7)wI?HX?)xPEqjTqqj#t-AY z17BI3R2-Pozh^R~O!!T}GH1(kY>9gN-%z42Ms$TP=mFaQ{KM-SIp?dvvol81TCXC` z{U*R6O0W^<5&yM_(asAhq0le93V;d539q4liUps}{QB#j7-CU$Eg^wMzQwg7BQe2U z-F>e9BGoMyLXq-xmXSv+Tv@w%=Zaf>b}S6!LXqzMp%c-#%vydm_W$-gh-&BkJ3D)f*#&J1*H3 zBgDuAQ=SK+Q)~N<-JjT z9xf9S6Tw}tL=c&I5*cNAU=HMA;Jub5vd>8^r0QL0&jI%wK=NlPBTiH2CG_FBkcM~p z#yrv*xA~qa-=0}fn3B-Y+&<>m)e%?ez>W_6kF2tJM0Kt=8s+8Bin6t=FwP=4zV`Zu zW6G*a2T_ffp7>a}@QsZt1gb`{{43KKN@J#F`(Y82=lj9Rub@P}Px&yv$636krj@IP z1j;Th?yc0OV>{rLOzzj`^%a0kT*dZtS;d!sft!DL8YU$KO~c5<8hKJCku#YPjB723 z8u>?@S5wp>t>IDB66F3DV!36%FsV68tBO@vMRrONAvDT}%;CeuGoc&bmp{_nGA&P= z9!z#-YXSH9>%ID3%tP{xLTJq1l>b7$qa3#>>Q1+d69GIwVJY>}IZ(JO`CCPhZzZp# zUDsa5C#fX0a#7=3k*At>Z^t*uPVfmiqZxzJUvtzAkpe!p@X5O#KvLWj<$R6-YZYQZ zs~{8pu7swSPwB{Q{wnpW+eTRQD38Zh>)n!Mi2+xC#g$2pN$;Ou3w;@6AAP=tn5QP_ zO%zk+PwXzWD!$D_d0dPz3-E&cghT zx?IM^P~p!BJ7qc2W#~W7yCFRUe-2-WqAIc>8f@@$W;ZvS#U+DFz;j)~3QuU+D0xJ;3-1q-G288uy2^f z;-Pn9622$!zRnq(T>lMHpgFH}K~?hj%Q5uu2mkj1Bc`zkBoxnNxLm5v+pD%zJV z+TgAiA=+Fg6zEg&c=F52{9>$w*0eDB|CP@F9er*kx-l15XrBE${ZH*nAdJ;LHC&U@^2#f7r|*mpE1 zYGs_=-}u*$lq)k~mMDpdirb&(p~ufQm)GI%wok-rlBYh?bUwPDxtvm*`uc10B{mh!~O0z7;bi_9Jm2#%(~z z#9@2FgJFbZ?#boiW1Ne+fq4&-*{m!v???=IUF`0oQYhDrdIej8d~!>I;wI**r1DIU zh}Dg(xWv7NELma|6X!_6isJxxjtoLFHVtyRacPN-d7xN?q`)va-tYY9fK<0epUuNB znm>*QP?-9zPI8(n!lwh%;HO)5!<9xxWG<73u~W1^_BrlJh^3U#rE-u-c+V16?{N(Q za-elgB+*4CKKd4>uRN5!d_NmZ^x?^Tt^2?fw@-`b@TJXqwlQr)+6$#xZ^>(70VOzR z{FQ9i2oi3Xp4o3vT|RRlY0{U!v4SB37GH~f1THz1cjZ*I@9945+tXtYw3~GVmRcOe z=2I%PwbpN))_I0U`ql4Y(6w~gjezN2QRg3yMTr+q>yz3oyi~yh3lLR_K{!CEGg9HD z+Jz*@VMtui_X2PB5ot%MCa?4gB8>x~d$J^mU8K2my)HToU(rL@hL?Ef*~_Luvr;`` zS8tW_Tr3}&(i1d?+_vFEewFI4F6l#RX{F4o{4N(2hv;inwfWW;RxDoyieaE5ga}g2 zsv9gCGZe`XnPi%t(iWZeGsTuG{kSTZb)JG(WIpnWE=-=F)$4#J_DTE1LZfPp;rQ@wd9^y$nJ53)#>M<>sFe{D#C!dGN0dtG=6XQg&vk1 zhCVx%Z4v8AP5v+Ol+6Y?saRLxZo@ay_N})ce~d|b-B7o?(X$cZqLB&MNq=1*U6O5M zV*8z@`KJ2F;$57!{r_j^Kfu)obt$aSU2NSkLo@wuCH`*-3yJXiq``;~-o*&HZUEeO zC^wxvUCAs72w9bB|NFNuY;6rbrh`$Y!7=#yw#>O^FIekCjwb#&hS`V_7XMr6JuPI1 zl}WByO4}*7A~U_Eyui1LITN3`Fla8Vh)1{aqS@I7F9&aAp47t_CTcsq4wU;(xR>V0 z|VZ}g(OSExJ}#G4MG+JJCPOk9waz2 zNB(x9_u#`6lEDG|QI}^uA*)_w6XT>7V`cTX32sh89Fin{U8W{7)bA#D)9&&nzu>OW zOK3NS1i#`8Y`^ju)Qzak&Y)xp*mhy4uN)!^L-;Ec?yU>4IiC>1-y2Q_(!la2MZ0cZ==T0A&(_iQ{ z9r<(V{9-IyckaVDsV5fikmVzSKePYdZb7+gmL%D=IxJ~952RkH1%?d!?je3H6}`$qf%W1C##faE7+opbTN933d)5+x%v zGyny-H7lp&%eEbC^0e&RCk4HePl@v62-B>!nFdkxi*06E9y?E;g@GsUT5utOY};Nc z931#t8Mk64v?IUMPccZr`Sjflskcek14?@sVi1rGK|LCHFT~lUA+sqPq^gT0-_J-S zXW|kKI&^-_LL{l3&^Q=C)|$Bn9F{syT$C(yi zI?3vXWj5}0BkZoLAtF`<=G9SMwv5$QcuYX$-6V{Dom~YmDVNw7{MaW>N~R144M*Vxl!H{wds6hEg3rU0 zo{=M~j}MG?jpFMAC^joEOLq58L{@ztu1W2QU*g5z7y_3D?oI$;unyL^VfbJa?RQ>N z@|~B?Aq$c1x^Uw?uCxzs*{y^;%ZFbB;~uQ>31B3k3pHRWfpdR40}drKv3ZscNstj^ z;();_na3gYwj70$_zN9I)W^ zNOWp9m3bDbGp%QlVAnj9j(!A>@T`>Uir-1C*VuDwrGz{72$sfSf83LDq>}1Zp2E*F z*g%|V@f_)|?GBwPf7$#yFAZv;sm`oTPdQvaXU0Ag6BYWiaZ@%7Xx!s?65~I@)bLdU z2tyN6YU%v`D7CD++wfkMO99xAZwOupw#?>G+3Fx6TjT0?S$A>+bjv0%L#hAva0;_miX$F(4BQDc- zJHT$EL(xLwT5_ zoi{lVTAr)$am^3@VFK4Jba<*M|2;ccZrV$BS(QPqn+JT0hr`8`En$F_2LhjbH?lMcMH>WF-2 zslijQ8)mcVz zq2Eor{yd_)5mHSUleEXi(U!zC?>0>)HAv~BUXOn}sE@0{Gd}xBG<`u8odljFUiZ z4bSETdD>q8&i?ArU7uFz>i_A@R8)4Nmm0j z1-+^bwCiCw>#xXy5xYb=a|mIMziq`%g9Vcp)MbiBK-bxO#wtuU>hC4S1~vwS!}4Hn zknV!u_y2$yUL4Kr_*J6`6$Hua09^eg9VH?0&cUF8%Vs&PyBLo#L9+0|NAzDbVZQm zv0hh}Z6tQCTd)Un`8EO>e791cDMHXJwG5reGd+W1QB_LB>hNl0^V4owEaicFm&_-T ztd~=u7yEqM5+Kedd$VS59t(If&g{3y3he3|5DVZ#hMx%VF^AbDdmJsMJ_4^oQ0oIz z@Uf=wf3n!pSQ7egqdD$8Sg*r-oSnTxeO;IQQVt`9oqptmrJ~*LAiU@LeJh&-@#w9wU$)!a(7^58gLd|kUrR3DRv(jU_Ajg*L`i{i*AKt0 zJXX=A?=^SX_7j}4*Sbe9bdP*1@e4++MXGrp1-kiUV3wQ55R2W$7o`GNKazIHfy1sB z)9-Sh03UT^mG2sGf94L0FT?qF=YmZTMPW9EL%F@C#b=t;r_ARv-E(&8$mS1c{S@$i zCKLvI8j@LB(m0S$;|y~5p#LkoxCRFZP>^X-UEYQKUSc;8XzfrV=5jC43=S_ljV7Daa>86+Cy z;G2GMv!knYh}?T^rN?gX7>wFRA zW1GFrylK654K-;hay)X5DJ;5w0Al&1`P+lE<0Nu>+Fz?Y$v}dYE0Tz}>G4(xq{b>w z_`^+&He#L6Z(F=(8^yAoj_a$TUk@E)%doJ07wYgti>OsL#*U2IH^H0fh();i{|fD0 zXk={MyhAU&Qhm#{I{HAV;{waM40{LU8>)U^IM8wy_ zQOc-7#KWQnm>m2$7y|cDpDM!2YQC5B_hBC9yO{b6I!0qWo022?;N}es{BFEuar)_x zLF(6P-!EgGKaOIVSSQzPbFhH1oj8VNJ5`+_;9y3EV3n+B%(wNa1Civg8&-~|Z{-EM zy;A>@alDDm+8%m!%zGM4R~uRDv}o1KA`)CY4faf6tb@1wowc9mW3CWm!e;ZHOQ9E; zxvoH@Z#HavOJ1!ZLGkM!<*GiB9%bJCS0slTRlk;v8#~V)#>Hfaa0hzHAPJSxR$k*_8P1ykU(U@bMrk-vP0TQ zw*Ic>Shb+rwO%h4{Hym|@jKbPoR>}PnhBu8g4mjDDug*f{=IHmc#NnmZwQ#jTF4&7 z(%MXEfl+lbD#CHfqzZvHCIRrn@43~z5p8ic_RVXMZv;O4c(6-R`P#cI#&;>4G3TGw zcd4#6w-m1rx16kvfjm`N+c6)r%wmFDkv}g z8h2cxelTjiFBRrKRa5fhb9o$bK2d)o9AcRowgB3i!q772^hNuk=jhn>X9{&dgnQK7 zEXo%@+>o!3fb+^)=<&$7O7Rg5?ldh_371S~JQ;daj(_$v5niLq$@FlVI7xPIe z>LkYg98q9VP{Xj}Kk6DDl!D3rNu!tSL`q5RlRPs+&IoqCJTcHvexeovPao@byx(}$ zkUJe9c!t;65GpnRPaQz z&KP--=Hg)?K{91YLxPj;yr702`K-DdexHC5=bYr4Hc_F)tGBMfe)HcJbm1Hc!Ae9W zQ?H12ZF=sgS@q(3N@k{j9w@ISC51kfb_Sa$WLlCfT3ja9i|oU;@h{AAswaP_r80EX z@+(c5;qlB^e#>HfkNE!1d-iP@)nSmyO={dksr}QymjMRpiP?fwKP}FdSX9K1NC0MXE&X6aI-6h_3~6DkNjz0%^_ zqIM<^=&n-cV#tnvlI=J=6u==p*Y0^pIxg#fk4q(N)Hb4K+eFo=xQRg4y8{)YT@=6wfd-*Z? zt>Hil6iFO*Sr-y3@p6;Pz=#gdfgN3Qwv+o$JeEU_E&>R}K@!)owQ~ti>Y<4QuiQ`C zuYNc-&;pNN2h)!QrP$TSB~^l}K8t;^9+WkHq>$e0XLk$`UjT5$#O?1)mmGz@OAoW1 zoYGLBvHZ=7SfKp+oNcV!b22e-qR1YY-W;y3-R~o=-rZJJEAOn>?M%hD6Y5(n_r74l?VAr2!Gyj=V<>g6ktah z5OeWs*WV)|ZVLNXk~9ap5-M#1a6(Bl*Zj02gK3+a{H}qex;(IB5?Q`Fif~~eezwq}5 zu>~MrPqG`L8Gl&`(IX-?p!!zigpeo2?8y0H$2{@PDsZUQ!iA!LQ6l2R(iW6I(B;v? z4t4+|CG#H%{OLn)YiVLQeiG&I%7F%uuUj&Gj0tJ{vPcn2J&cHO_GJS#y4>(B7VvT- zzjpRMKUi_zK;#u1eS-=lr3|llP%@Bs4JP&ShLMNt(vxuMjA|OvO>VpaZ58riE{s!e zP6m2tUuTU3z-GXE!$o~)!rhIJR3gFs1HVBa0R6J!e*BoM)lk`zYG5-DF1Rh~^)UUquSGS!`Du=eLYcNg=XOzLz- z>n=C2n$F$)dZO)*peOM0VCgKC*n)|k6Qk5$FKxcZ*vNw9Xw)uG91b>x@(s}bA|}e= zAWg$`Qm4u#k)dAKMKE&+O2J2WtNXw>gyy~x{C1}R@R@3dUWCFrN>)|QQ-_C^2E9@T z!$X#j-q3sP`61bpKWvv@!Z@46e}DUP71~>&&RtADzfaUAJkX@I&9+TAQ^D_X@wCw} zJ|5*wPjD^Ws}^}E6Hoi1rA6ow$Fe{{h$D4g4B`K3aQ~_2SWwvO(d^&c&TTgDPh;z2 z0Us;nd!-94-Dpp8Yt;iK1m~3*rr7Sq^#2k)6eECR2RuOwBS?XqFz27|8Q2RPhGV1h+5ffhb<>@%0%aT%Cj7fK=Y$y6p{p_So96`qcz;f{ zEkU7IMN*g>>XWRtJ}87WJ({hBN~*rmc+FtKrRf%C+ZEjanKwO zAus7C^}yktm2FEtlYp0HNzNS@bGz#xD(^uj3b7BI5;V^c1y07RfyalK=Cht#6BAHW z`e}xREthB|4pBOZqrq>4A|vrRp*sNkh-Qjlhi9oM>7G@YqDb=nTuKUt*hzwHusBwe zor1ks&+4g|Rx_RfY6W`{ROScJlP7Y zqr{1lW46mrS4ic+z0T+I`vz@LGVdo7u=QJ!;_C;G7psV?w!l$ zMPb@vF4&4#Hov{&Xhi#8ARo+C1RYDdfnjP}$kS15Eyos#7euWAf&PiT@GOnpngthA zVx`Z1fR9)T_a0!-WKI@E7Aid4kRV=PZ3_}Gt@LtYnEQgV8bREs1hh(llkD%kzoPL$ zDBk-F2m3nnTy7HQC{JxIU%AOWkic}ZO1Jn~|7#s#id?>NZl+B_d5yKqSSO`y+ExM2 zBJOOM^(g!oToNjoshZ+ioLyp}glkmFb5txFafjAQ?xEIpOC+mBDKiPs5hg z^sSP&6C5yzcs!Rh;JDY75sJIHKaWE#;jfQ0rnc44tQYL=6>KHArmFU0CV-z-b||vW z@sCVeSVG9lY!o9BZ%YH>en#PY4!#jLwkyZSGT&a%>-Fd*9amV1ijekSBP z>{i*@nB_IO{K*L$8O82L38TUu<=4p!GYO?TGw}F=@_5#*0vlqXA<9Ts%s>B?C)h$^ zPeik4I3$j{?NFzI$arltTGderb^wa^_5MT0$9+xXZ`24>9yf~TiC4l4$>pY&)ci~7{l_&9pc-F&P?*v zyR=pywjd$VwKE<+4a3)Jez3sLQy%iX7#TkWZIyJ*JMeffx-qJ9mikK=p;^PVCPPH8 zByKTw&A`(8SkRPFVJ@=HxCS}BYX75&mM4LT+>uJFXnO!sr`@|Gj^Ekz9Ba?OcGZ{p zkk}?Kt5q4MSN1P_{}%!b;1Ho*yu+th9u8IJxp%%o2|)QR^U1AdRQW+dzr~{?J5yu`+Gired}sy6P3#qD7&6>R4)X5i zmNW5U$H>lT?+?{}C9y8@0C{2>+*d|1oBr`=9O_BNGhXB14*E62Q-OhzdKiaciiOcz zk4oN+lbI$C>5)vAN_7ex3`SR3?=&CA!W$rb!%vi2pC)-0MWKk?u|Rm}VFNVU5j5%e z8n#^{;mEuv5gieeROshLy>h%>&^}}qL^S)OEJ&s4blSwD=GcM-#RM%%qRpZE2^3aT zD*F7of$ezxF&K0%lWP@sqVsIQx18JQTz+^roVvhLTyGiFi5m+%;qYbevz~o<4<~I1 zok!{DJS?=$v>dG3&R5|a9<4He$nhOXlxP2~&k9Go7*7~eXPMJ4c#CVaG2SB(@&0xS zlJLPF);v5`behIkWDa>GJnET~n%uUl^}65WX!{%p-)>GB&z*(8(}{f$5837L9^O97 zx$42nx4p!i9`^Hh%!Mu5S-l{}vmEGr-&tW96)nh|p7T#!wh2e`@~)ra%+{6pzW;Zh zFE0Rr@Gh0M-n_l_a{W0&X;FcFt`n!k%%@%JK>zif=K9m)v-H6{WdpYA-vcHwaZ&`y zR|fMaEr}gzE>tAmAtlu1GR;_)HyY5I#Bc?g7T+O}9{EJp=v)L@M*^ z`s7*Kg97!Ik+jCuFvK+9Eg;@lq1`n=c3b_2Z!V%%LizbHcBcEsG|63{QeSO?XdIZ{ zW0fToHow!or9tLg-^nbp@ZHbaOC1+1vyy^|9_!$cw^$!XCI0-$3{60d$T(tWm0vju z@uJfH((MVQ0R2`t!As6f{gBk^wx){xnvWr>E2ErL{t(*XLW-Az8gysj1#W54>l}~o zS+KF+G|279w7}1HaoWT~GAA`@kp?KXLKJf1&>J6ofc^4PxhqJ~9Zah6md6mNE!Jf}po>kw^n zjzH+4npaeguQQ#F3hT=eRdlW0;O`W(BB6T2Q?Rep%EySDKp(2G&pbn-IavP4O_Tkk zUcDG4K@R+xsxyE8gK42ay*z-Ros za{up%k}@3Vpy8FIupPZJ8tu^azObeEc~I3K2lcbE0`dfte2BQ`U+K!pjk-K4X!NHv zv$3t83Z2!06q^3<=rIdr*jt-ukwny!0Sfa&Dw0yZjQVf66PePsLRO{Rhyx-`dh<_9 z2yM`i*qjSHHGEL3^wT#=Mg<#_t8^i(VK`869i=zox`s7|r66e0p9f>_me}?Xp9_b4 z%tMC{v!K8Mo_(*t4C=fx`fz5RBtOL5nogN}%&PEodd$yNYI%=Ql#NRLhkW`aZc=##Usag5my4#w;s-4orBCD}}ifpViOv<~oD%)8mH2b=s@i-}-VN}Z6 zw@7sfce&{6vwb^bePTqY-t{zb_=rf(wpw{?84qrqHS*!2-4|jZA=#OkQXVue-oZ zSUnETYakWO!Dk$ly?w4B@rNr@ANM#Vjy4UYe^bIw0YE7*EEG)ML1?4spkgC5 ziO^o}1)0GCYMd-};yB_3&RLCkIs}t~eKE=tCUG{y`{&&q%zZWX;AvH^lDJqRr&DW=_6BL0i>@#53Vl5Q+^lpI$9%=HbFU&1J2m4_=j=+ zz&NJX_69KGe)i*3;G698{Z&L~h=CEQt#AA*D8)<_XYV@#I5<@>taoUzlu0g|gOC?? zB+~ZC=?RH7O8KfH#h)0ofFik+dasRkh*>BN_-eB$XxLZ(s?34*LkOn?#TMVEnFbN$ zGqOW+pj%iad=$E1$bh|AOw~KZ8z{7n)P;J*_qAkBT0i0zjF-;__lS-iiWv}&9byKN zzN=@_oABm9KpXP!#|RC2@*OG%IPV9M=S>TOOEt{E;*oA zA|%UV5z_y@FzU*!7J+PyMB)tSYw>!U#y)!EaZb>SZ=(hk`;pyCt*v@JOJHosh5?WQ^SO*v~bY`gQFEn z<9QhVw!xa%GJgTCRJ*GH<}l+vtaM~WZ`KC#)>Uc1tf}hV-=`msr*D)zWX8lDH>rrH=s~VL|w6V z*cml;yGfR?F9`ugJe~?+)7eNXX6>Q{=r};mAHi~yEq270FYr%R zG=c$lAKaWQ6qC(~680Qz@^k&m&O+AtvcjB(y3cEi1_;nBo>e)K+}{0`Q2>?OFzJJ_ zC%oJ4Sker{?dG0OLR?FQcF;hMkiKfunk$a!A~~TIqp7!i?+}|wQ@`f7;LgEoHv+|n*{;7Kjs@u^T=`)dxu5dpONb5D-RjM<6Wkdu@@2|QBc$muho zIYp2w6)Wnw@e*JU6`7buLh^TCJ9%z!R9&z9HAej!E25(yb=RS9VNO*{p0~-HF@amX zjhDmQZ`bjEgs0G3_gWN$&}=A^(H`MmqkL4ZJGU%DiidR5v8n9b_l0awu1s;bWNgnQRU)LFj31IHQHAjgL<l4C1KDa?z9Cy`TDcACD5ZO*`!^I3$+ zo~O0O*R`T^9l@{ngx-@vt3o)9upe^>6M2 z%fa9nb7A*<{m|7)ku{`miH`BIv&s!Imy@?h60n+&r)-(^%B#tWK~Le z$X4f(#YRU?Oesm#q^XT+o(ja~S;i06}pA@Y8u%VLVkWn9o1Uv3-MgY90n zBR#qO76J>VVbG(ZPM`ZS%HG&=0V`iP#U(b}$Y}L}3#+7_wg;{_+o^->)Yc><_0=Yi zw0k_tamd8Vn6?a=eJOz;HcLHDWlu@yyZy$hmJQ2U2a3{9;6VoS#XLSCQNTZ4hpzu6 zqTAUTLJzVhEz)5jybzj0rKfN5yKwh4-NGUlnYV<*S?FoJ8Ly_)s;uGF=Oe3?{S>|V z%8s2eT3(FvpwmWlQeB=F1Eb#dBmU8B%>eFzyzPQ5d2Z&-Rcd>7i+tp+GBfmaZRfvx z4mD8aX;m?sdk{K*@jV627~UU;RCqQ_aWYpooF@j(qc!yBI|<*2G<4@HhmhFE>~F{5 zxdh4H+*d9-UL5AeCm)uPzO5t@G;(jO-Qh*)_e693L5}UVdg=}EdU5MKv^noJML4M! z@a~rUk8JJ=?nur@G90aQ-W*tfgZf2=k|TortiTs5VQV=slKNMnzt}fU3?T*ODBTn3 zO*}JDX>EK3z&4_HeDaAVh{K z#5(9bv6sNKbH5YIOn&5AWsTO@nL=EL(P=QWW>G1B=7KZsCKL*1zI02+^?thP!eHSg zmYRz7j+`xp@CxEHNNC7VJdIY;9OL~e6|E^D zCNii*S;e=&UeiS2uVP+z)t zj`ejry&2)B;X?>@oto57SwG;NX9t%!!1-cpFkB+;kEuV*L}J^&O58}gl|Qe;cy>t{ z%a^@uI8(&b%`LPy>pnRX*59!)yO$zbq}42o9?&xEb03B=c_CsHEZG<>=Kb4E*NUpi zisdCdvIo0)>mbx((n=}}{G;PG0z9`{_qOy}ov^d88wleW7&qU2+0fAS$P@m!nj}jw z=eDsU@jCePFk{YLrr%|{*j=vj(kr*R68o`1v&!PLy50vkK}h*`&m^`5g=SD=@r0iv z#kz>|3jFDNB}p)-rSC`;NzX`I7d4^RTbtpWz*$T_ST?1>6}?JO)i&=pWg3DsA<6KV zITm(DwAozx6Z(DI42SQ}jrv)hY-@@oBByK3_&}~ny0oA6r(*nI;UV&4cT~^|J{&@A z4tdKmImdmn%16N)K0QS#DifA@>Ge!Y0)y)+-rv#2n@xJB2m?KdCfPD;#gOtSb*>00 zQsHjU-oE{&#O z8w6+Vb>cHItJ$|Lozq(p^`G7izN64xRXI*70|;B^b#s>U)_u|8Fu8Pb@xr#sxk9yk zS4+h(TA5lTrJCl4X zY3=+fB`fU|apBI5pvcA07-6 z-9_yREd0Qe0G2nRc#kqs8676+q^GuyJYpzZ(>wS)3&1KU-;c}CeQthiXij4 zE9UUFa(T;<#)iidiT8UyZVoPDfU2%$lx>Ub_t`jWqi1(D^Zu zuf%F75f2r|D+)^lX_4uQkKNBa%N=oO5L$Z>p^g%{o&{O8)+$IuLx^fh@uHblxPC3nuLw}nWtim~nf(Z(lTl|e z4Vf&PI%51#COk$3x7O>-Z?f#XnctMz7!(CmD@Ou{d))N`DA7`Hp+4%z_(J9-;37qh zcxuNiFDS|^^A;9`op`I!Ab;(K)dh$#{5c>BkcXQ~k;OuSy*IE>iXs2<%VWCxO(zW# z0J;jgNvzk^ z_>{h5MbM0B8E#B*e!;V`HEq82bk)lAdHJeMWkfXnN#N(A(Wbjp|B{R`$2upP*?D&n zi0rFA5J95}9{d_yWMF!59mB%m z_U%OB39(*lZF?I?Bv`rh*$H`|d+oDHUTxd>2zkKb(FK*o>shVPyoJBjUm(7w3bz&M zwaBBBt_n^!I+aBXef%1u&(4?x6#$*!4gvWCE!rjD6(ZecwlH#PURIkIzrq+q)*F-n zU1d1>!;S$`-#h&K#3UZc5`+9aRcA7s@v0fc)rSioY7D2-CCub;rvuMI!SgX;Lhln7 zS19y7%icQv@Oab?$9kV9IHjG^U|Yb4ib%hUm<5av)EX*fhP+MR5-NWe?k@3OF@c-@ zWNU3W=eiTuC@zi2wkw6oZ!Q;brr`1*Gq!+nDMk{M61gXy%qN6XMcxVfa#`Q_W6aAF zlR`xt5t2Nr7tEww55|~k$_mfj_kBdv(C~{nEc&$w5}nlbqUW~0BCLF7)fQc)zqVO4 z7P$HhCfjIHP^~e$WEkU6wx@mw3*4}Ur=c~vz}b6GqWKj^c}~J^s>KTDj}XB3B~w(f zO$p#9%6RZVsgsAnd62y<<-y{{ zz-ueS#-(7~!36m5t;*&w$4zmy#Y$<9CMjGqB1_vpQs841W>SRL3byaol zkGc};VR%z(Gtz)nOAiS#QfXK%i(rlIoYQoW`7-wm=#JT^%^$yL+$tu~0~-^0Lwsf} z{h1*sMv-lJH6#*0a(nX6U?#nLyVt^eWl9JN?UWCmMr7E1C-Nw`hFx7^FolsRcM25W zJ-*I8O%G>?-z1&r@(c-oa3uG9jy}KvH*LPeJ0~higS}cB@WuDsV8oEU5S*1sW`})J zCh_sU@H1@u0A;%IWrHrs7oCG_t8iF5cqGN+=EatjLFPw%@%|PbNN2611p9P-D0@m$ z`Yj3^>?W7FiAA?r%#`LULi)aH-&&nFx}K$`&5M03-oAeLA|k>e-jYKF7DpLoa=r^s zFlMLG;^gAk(p=;%6+dSMvXuZ|Ou?me$^MoPfrh^jPzU$=2 zDlbi&ugMQv06+5=oK5?0-yDF@C8SxAef>6|2Hy9itE)ih)mVS^{T|@0_Ki>6Y6`Jp zv;Fs56^D0_=8m$@zmXJ9u|mxVXirsK%Y%sBBaM zi9p#fL~XSOrA(7LY^JL7`sNDoL?y?@ryl*q-`rs^hAiIHZz#6l6Q-VVKZ;feT21v> ze(0L9q7X1)pwb}cW}x|rOi9~jf`i3_6?$lB!4?xnCo?%CGK$IzA3^EWc*O&%(OCY# zpN?#FO?gmQW?w-`y{`OL3jcoQ_RD9?M2uNMRbti)SL=Rx>ZYlRxx*F*RT%D#niKkT zp$?|qJe-&zg1Fnl*icbtnmeP8xW{P1py~+%@eFPyo{C^-(H{;L;Vah^UmS8t48_w; z`X4ks&LRloj5Z1Dnn(FEZp%cM4smlTNt{?j*mW{)SxOR+P*hL3k0X;%6OjjtXW&;$ zFRjA4pq0>hP2lu=PnJIu9TEh{p!N;VoAlm&#zhZ`cRt6C1FAg$+wuP*vH0S zOY5{qTiJBJvCSKkg`@9k1a8Ut` zW(BSC@5v!OTIymK!pBaKQiJqDrzibl-Ih&<%>coQo%ft6&)#QbtnH^+UL-+Q800 zVJ!`Jm*9+TGsqlyK^d;Jw=V}i>!9oxu?W;#;d?cKgN;bd)UT$j^*jvVG}SW+T(^cx zZY*>(7T7Gc*A?)T3u9vg=bteO#@W0AptK%(l-N$(tN2nz4m;z=dHu#?b@(B$PDRFa zXx_`DXPl=Trt}A;MA2Ua&r9+%?G(n?r5a;U&VL+a+b%y*r3_-6;`~OwjrP9@>91a-$wuUe zVelxN_^H&Av^~LLJ?cU!=$`RNY=P3%y#C5hi~&m8Xz4XLp_IqU(Gt9bI5I%ByM6X3 zmGJk~zBdHiPRXz?G=!VH#T;@a#j|w=omaAS#jA462w%OLmCr5e3b`nt;=-DK(kvVD zNOamMAxOE^Nrx|tX_O&@1b^7Qo^k%qv>7y@Ol=}C6|OUsdqE*_|ITLYrc+Zjs6g5k zLtx^2+xGGGW%OqWu;X|LYxx!7mSj!eZT08*W7i|YYFpR&h3o21oJl`rbJ{?^+s>&b zbNyw9(Dt7h|E6=tqW%M9!~ zCTpI7E0_3S`BT2#_xO0=``hl5~rci zLt(~HGv|rwHsGYFEiROqxt0KfapA~&Svg@I44&@PN+j&(gba?*3fSz;&x#Gk5?lY% z1hMQ!R~D>UfJ9=*Q!87CnT6IyS>QgcTd*(W>kPFs#M{Xp7z>;X=i;Z0a0p@|jPCaf z(Lw3Ao{`|G&puc@V|&pd;oIc~F8wa(f{io6vrF^*|6WPTlR?}%7J%%quq#SC4ID3K zTWqB~dY+xO?@vN&P&W^)q6aQYd3No)zaC25SCIAPT!0~c9}+&g1CsRBPM7ZwU2oeM zPi`8o#@6wb1-*Y0K~VB!A6)fS-Y?XyDRT$@h{@a<-2D77D{|%tR>AAm+2E*mVSGl{ z^+pC2?KS0?og_cMqP;vDF59FUVbdpB+0)dWnkTfbee1*!!sNcT&ZJ(F!Jx*)Y^C3E zNI!ss&4^?{2KgpTg;Fb0eTkB#gcxPHG z>E<&#?vq=`!<021eCRY(9lV z{bML^O=Wl49Il}C-9WI=@r}O}mKY7+B%hQMiF?Sj;WYyEfq^QHl=YG#Ira+fSt4{r zTtR0xFPPk9q!zyT@afbW6g?*OWEY&s4cny4i1g4LHW4f|8rXYb#8EnwkR+~vFkMNu zsQNd5I9`pkZ}196hy1&jv)58Wa_9B(MUNHh z+mYeJiSuQQi?5y#u!Ir2b581!41-ddr(wV)s=B2o=|CfW*-hh!<*tcVdnFrbW0jLR ziOOXjtKsC~1FwmMdbcItyR-qnf^mFd70^NZtD)BLsw<32JptvQMnKa-!s|e?D6{HijXX#RsRGCaQfp$QtKxae${O3 zWQ5MqlohhJj0);4^y2U7XUEm}#tX+PyTUjwwpb1aOH@KU{I*%i7PjW0g(s%!zT4NZ-vhm2V#NY4B?ZZjU{ zO@2VP&87FJm(7iThSw!?f7(~yo9;gqdrXN!|G)h0e}wc#;{MQv@6|`!y_^V|geVMT zQ-2C_?^!qM}N-> z$LI}W$%?=tIOdcmn~)NlI_8Svz+yomqY&yHpP2swdcKm~ebGkFS@FXnswh^pB;qk= ziT|khM|Bai5q6?6V)8biY%Q_`H!HmO%0nchEu~p_khT)r=#`E@ZcEvdZ7Ue_5%UCw zGEY2}ongNS%M)Mi=OF-5!dpGr7C(;RFjKJQPq#607SkswIo;HA$uCP9sdpHiOYz($ z8P7=^17?N$_tK0}qC6cAiX_`pJiW_Pw87IwtQ7^1$_~Q-8>ndAl|(TjY#4|d^LjUs z&zAo%N_|(J`gt$<Q#04ZTc-x^StrjoT7* zcE)8>tjRrOo7gp%p#2}7!bxT?WtX(A-|u%4Tm1M_?l7ya@a=BX%lM_?vb}2E{`kgn zh32+3d9|vw^zGu~rtv}ivfcE14s)Ld>)USt@UDGFXYxOc9Swpp^v+&4Zb9S4!9MQ? zG&8C?SOBXhwsN7g4}3s5`6udx1{IuC>F0WeH&#*~trL~Kt)1H`o8Oh4{c6BT++Isw zS7~Ai3hd{&i26Q1+zSUs;+nX@L3dr+Iz=g|bSg2}n6n7YU^P(s@*$g0yyOA)`Zk6Y z&o&_5si?(wV}bP*7wQs&i6B7_@THwaRq9qkc0>lM!p`0ik@&q(A#Z(PghQV{A#!}0 z#%EY~?$X^dlH2opche6>1~aW(;gIRBw`7J@5A!2H>2%R+?6q3^nx9lCo z4k$9lx2w+{iG@lzlQaW_v2Uri8ZF7=3}22BNFFTtWr|m0ceb>zFcv~02i0k+PV1a? zYN8d{X1wCvrJrW@5CHm&*ksFrko3?pQ?Kq5M9U83FV zaNW$?2|}?QpP>lh{8V=|t8zGQc|DWFS1?2QsqOGLt>p;l#sf($nn=7lg#y~vN@~_U z)>9e)Z~xF9(9k8+PeO55?9fu-kWqKm_s*{Bd)KXyofW8wc^8y)@Lc!3lFL}p7i`(S zS1xg2N{90@V|JKe5N%Vkrh;EKvbERt2tAC!bpDkrZH99^1j;!)Ou@p4dl=L@!%mjE zu4H}Ln4^GaPJP{a-qnKUk)cx#-%*-m_AQ=r29N&DQ;p9>N=p{f>}J(h8r=40#ymosBw8p?==fFa0jf2^fg5t|@s~q3d@~u<|oS;W4qS zLzM(jYx~4 zPRu?9aJUwXAAm&qT`yx(N>t68?^rzbkDxqi8yYgYe2ESmsI4E<$lBtA(vnvMG9p(i zqbKMpK6~uS!?mtuC4?EZ<5%^@mlAcyM&^lcr?)x}PHFm03IYvFX z8zwm#rL#fkt|kMHTho_Jq=u>d(UN}VOvnGEd>dq{u)zcv$}>)vC^BN)1|*zbVy?{66cxnAwdy4F@pQzMj29t|I)@Cy zp8bHr8wvA`mA!R9j631xjS>-yJ^ZNtz-@gy!tE?q6gqXRlHgQF)nyk%G@`T3SBHI- zmcRNvApilouhyR3uL+drCpSSJiQME%M&M5?8xkfMG?yh78o{kAImT%`w^XuTiBI@L z0{IU$CZ9AXA1Ym|2bS{ z4T0(VW!JN}m&?#bS#M&M_Osijqxnmk-&L_I^G2lJ`8cWljK^WU{&Qp7YOLSeuMxl4 z9K6SpKV#1mZ7Xdgo+tIsw7dU#lLUe>kUpvEhtT=P#_s%(^$FlJor*=6DpsAkYjz;N zn{aUufrdN@l;O|eG!~{VKr`PRBrw(drYz`#%BC8}$tf8JQ-B&j*W*uBWS?pzIf`Nx z$u6|mhugq>hu>Qhb~cY*r?hh~>GgOxhZNWX$3OVm@03wLiKwAzUsfY?-{K1KK3}8MXfA zFc4xhu#c1GK^=o!ZY-VhR>sytq2OS2(Ez=X`hz*&;p|B?S#ZLJ^lI0{A_3k<^(C$o z2yY$!*6h-f>joHQoIiLjaZ3*(_gX26v31YezA9j}KqS*4kShNnt-TGcgICAT_q-)C zq0?1L>aGaixKuK&6j5dr0v*%I-*F@vZP#~FD$-Q*Jo zZ1J4|#knH?)@OrMUQ1}a6mD*UExvLu6GVLKNVAwp?1>_2Cp zkdz0M)|eYodYs)#n_6HEh{LNg3jcb-f|+0TN1E$FTN!0wvBk-BYP=^a`KXb1SV>YO zw8ZqYSZETq|DKKOh<`f*W-ar4Am_)(^4k!JoH?)?ZD_miWMQ+;&(CXT*_}PoNQW?N zs@$vLd}WNl`$s-@Nqqhg#7}WY)@&L5=>)}B)vv9Y2sFEijt44f+^}u*U&lK3iI9el z7oF4YM7cSGtM4sI_sd3~j?o|}T6YEA%|P8B>%0*WVrCDY2n*?8_-*RuwVcOYVR+2I z<1J4+9AI>Z!i%sDdZI7aQ0#4wXtK*)T})05_rB%^VC4D{cWg`rk_FL7hOhx?j8D5PAQ)`&cA#rmn#rk;Lr^9{R)rUd?Mb?!6Jcd5N z)eZ{ZR47Zv65!lRuwTOA0O>~GL^^PdEA2uhV56&?Bd#H*Y?DX$K{2;(YQxL_)J6TU*BDDKd7pZ@ zmTCE`LsPJcvGEp+0A&EC3`mb9g`R-w*ry<~9lDkgeE9C?`0-MbC?aN03l7l}Hw_VN zknWWjvu8W@HKlyRtET*=^HSUD;XP)BPkNJ2KZDQwGEJ*)A-!QEt)t87A>Hl)nM`rs z18K>n=QPNruMc}1^;4TIr&!Ic{BU)PG{*H-z&EMM(|zKQqGnxGS~yIoPY}=nfg+;0 zG^n`oSTG;3Rf*vrA#LD)7?^oEd@eNf(+=c?N(k1DElBj7CPI$Y>^k@q_&MIgFD=|m zUVHkaXLI}t1oYJwfCXxTe}5n8v@aa(R-Qi@B&p~8Wec=CqyJeCMk)!)$1zBYD&j06 zDjA|FpB=RWo)UrWu42#sNSa)rSRft!*{MVcTNeRLo!tY^zxB)&atTX(Enlli%lt_q zutA_t<&y}Ff^XdFQ$KdC!SLIUgVcDep}_qN<)iZXra@e= zUW~=oQ3b2V^_#VE0OK@ph`Ro>s@CeaUg?|*>4sqS$3GfRXvNKBL?eGtuJ7O}!^XPl zx|$&&$da>7QCf2YQQlkuH^ofHu`_K6@~~Z5YB6HsO$8VB0=b0ph>EO;DHYdE=Vg!Mx~`iD z&hs^nnC}e13#6=<2~Kle9p_lb3C#G30@)jJ#9DP7T>i;=2>%Pa|NTOKNN~SUk^p1L zh}$5aLG%&-I5@|5&E+6UspK4!NKI~2#`>RBlw;cB_Rt9}`j&jpoOaL931~U1=-h1M z*)I{o@sUWs@>#xfdCb{;t2^u=HB*JIWI zpUe`E4b#dd87wuh9>PalM1$l=Q`*_Wm}h%6lEcFfl!ily4-;)UNEy+}GFQOBqvBPC z%=wVH9nUml?N;-wo7{Hi@^LY-mdrEUf3F+g+<&nWxay5Ot~?w&O$mMjQ+jx&ih+i9 z@BLD88i7NKfKNLnDP5Bsg%cc~EbYgV#X&?(+*g|Jv#zcath;6`aTm9F*lEFd3)@_J zQ7xhS_|@)|S{~*c;gHT@tNprNkGQ^*{Qtt@e^D~c28VbmhWFm{mZDqFG00%1`1#}R zmZzw03U`$aa*ZKWkK0o0IYqydPeFQvsVt*1%N8$jM_M%Z|KSTd;6V&b=@uxWzm1~# z6$O!vH{Tx)mmaxVu6v($iZp`iChSw(Kwok>3{^t3Y|*&5<41o8ySx^l=-SF9ajM52 zjRKXXuGVCU()4Yw4-8siKt%Fv=6`h-K;kR~Lz96TM9R<32LMxl4EsSM z3X%fy$NH;96L|T{-Y$y{5maleMwSU)Qoc?Z7vE%w1qIHS;K}ay-9zKwzWx4*x1za~ z;&65$!DHh?6DNy=TRvIx=^!A`iLfbMleFDMLhIae9ptR>iExY>%|-AG!r37 zk8=b%NP|z{f_)j3#2*Azik|LweM2 z%P}rVO@$rr>8siAO`UW1yYa#t@UQ?!ay+Mp%r5;+bZ?im$byyyUXpPz0mULp6L`P3 zTOuk>P~Fg<**+XJPYygapJGk)+Y1j>)OOl0yo4GMa??@sxLUGxC#6M+<)SeTDum(J=YIT8PwZ4i$9iOCj9d)Uel$hvjpJz12v^z&M~Nh@|F z1JUJ|9TXk8dE_%nThz!tl?XN!7hQJ`Jy9d&%QzS+J>)#>@jYdZ>Pp9lfv4wrqo>@r z6b-7U>nNDFhL;|0JUOJ<21f` zCEYS6P!JtmFR~zt)Gh|t@3osz#zAo|)2pMirf5#c`(kLN5ZijEc4Pto`g(pU-yN}F@0B7;jUfSULO{WKLl?TRb!P$3?3K;n${Lgn?p+IA%Rl#nYW8gW z!3Gr;Kgn;S-WX)ZD5eS&Qr{=>;A> zMSy2lb~tL|1CXnDX_7yWBTdj(#ZMzP=(y$4J3wavXByp2BcN zLmU;bdbj$DDB~?c;Y>%(N?Mre(R5@4C$&x0Qu%O87VZ6~u;_;SzlRYbPw92P_-($N zABKwG|2Xv?h>|^S%;{i@Ci#Xv62S1JchWy(H^fT)2Vveu8d2mU<_y5DyUj@b0N6P~!4#5fT?(S|OxCer} z6EwIE0fM_b1b25I+}+*XeegTqId$)?^MBYc`(am2%};uDuU_59f-)b_cfvyF6Zuzz z5kUZpzPyi`x?DRRTavrfyhCefdl#|s z)k(WrfxIIdtKW|7`MQS-YIll~2_za6TP0J*A7y;1+tEy5qpT-+kbbpcVk+}nrD{K| zG$!}>CMBeSrnA4JSre}0ng(h9h?XwH$L`MACm3dl@o==%6>3quulIZS>B=yCv{V@V zI>FGBjXlmq6b<6gTw1a3(3tf#F5hCF*9`^IBqa%lDvyIhm*}F#g z?Fxc%Fp?z8;3Xb^VSxVLvvbCy-M;c$K^mLL!a#A`>H<+d``fMQ&HCdiW6@}y0iSTB zW>ZeEO~H)-*(8AYM5j*}Q$nS?>ih*N?n(XT5lV;lHV!ibUTt6GJxXix|6Lkj%)y-a zyqj+`H(Kc}NYb=|o0BC1s%KT)8$>2lCyr5V=LZgOkiG_G%z9!9d}3>_$DI&Pxl9JW zxMd&>WdEOKp&tRUr-batMsB+B)%thMvH4*qFpBSPo`C#j zksKX_UC%8_&&(sgVY^vZFJ!A-OO44#KqytkF{LkR_X-sOe{BEhMUUxs2V@I&IkXU} zLC*U_mgV5^f`X~Tgs7kqb3(=>3kt79dwL|{AH#C#BAzDc+y}Ng!g(X(r9QKXiLe%g z?+fH_Gh&cap@D_z^-3eoYQ+EaoszoA)a$GgSYnwhGh(1td*KXNUEf9s?bM~{_0_a< z9>htCY%v-4*3}Fc{<~&)fCTcWX>NZ$|^yr01(G;9Fogf_vFS=I3#t zluNpVPz`S&+7=w^oJxQ*9DDerc9<4(FJxbGZ<6;3Kr7wSbZW(Arx(Hf|J5$wJK`7; z2E_Y&4s+`Su+}-2QRQBMt(3z>?t9P7TNJh@1?oJays*N3OE6FI+d!L~EnESvdPAt-S+ z{}Y78CZkMft9Ys8Qm1A~G>#X7snFH_R|h*b!3FCAjNNmn8Z-muJ4mVFdX@wllRP>! z#Z6*8rkP6}@Eed*d0LaleU<4Q^uT4g%3R8M?9o6WJx5zd*5{{QIxeUg-C~lS5OeH4 z5k+-!y=f1dq1n zzOB~J6Sg3nd_-!tQ(~l5gjeF&^7+U5FLcR;@1vq6Cs+EyOv4FTy;x~p)`T{j&T@O0 zN_B++H7bn#Ha%42e3hM~er>vHPBHwwR-)Q7jWS+w_`RLjXu^)E-LIh-@Lns)H5B|V z!+Q3E3~ioWp2nf6W~HD|T_`EbmWubQV%-n^W+W&gOB6&n_$SH<(J0KJ9QeUVXSutc z(Y!b%QlFzp@TfVfQOHj?&#f>x27dCc);ec{a_I=&EcrjDfnwm4_h? zYrb6qtfS#kLs9QGuZy5Lob%X*U#}vBu;bna(9h)EhVX4tZ(emAseIpucLnN}IDVgd z31bAN4t*PjtmR)nJI4C_KNW`yBh<6Qv4sHWp_sSeK=+YL?I83_!21o4siB2kF`Li6 z|CNQ92j0JRZc^6oA;CE#zamJ7q)#y9%CM<02q&@X|9lM#W)%f;P>hN105XOR-G!M% zbv)2t+V7av-(#7TW&~bxmpdy7^|7zdz%Mr(iY?JdA`tE?;cN#!ZV8FFmxfjmlb^zB zC5*n^>=Z{sU{5)YH*}P1HmL7Au@1t&wvnLNUDti;^2An=+i)pQYly&;wKi`*oi=0m z(~GR8iY`2tu&(^6Z@TT$>6eAhMCe2k#s_82dVy;aD2g-meFP= zmv00|5zPa9zx19@3z^hXj+QAG%mdLpKgMUF#Lv#MxdnTwM#M+r zl>&f`no5d$4+G+4S>VVoS7g;Cu@(+)&vr8{68>kxLj}<=&5b#qtung>DE2nk4t}f- z1F4-&TqH}Etgy+r?aqclB*ZhSBq}2aiG7h6!b5NI{}*+xq2nb7aWuPvQ1SAwd2e1f zeSFi|pU3S9ud?rsUaLFVI`0-VT{^Zt9iL{SpK|&B|JdVyPL^pi7$JmlprJ6R_)m!F zQL`iOi=@6wI?(bH(THF200AKuvZHE^A+MxF?eIydEID6J)4<0-;2p2Jwh-|lTJMq5 zBcHS^G@c-8iaSI5zBxxIAa^GR*0+L2nv|L-b8T1v zp?vr(+}@Y2;gOueEX(ubx!EXAJILh5n0sW!RS1!jg?PPkKJ_x%<28-b>;#X>Fn z?NQ>#rd2#Q+@}@y@gvk$v5`;{3Cs++xydG$DJ2QQE(g!hzKgs+^Qp2fi=U|Rud2I! zj!}urmd+6YCzUfZ7%~>+H-Dd|A==!TT{YY~%qX=D5G65))-fuk03h^h&aVKng`g?M29{GW?rBSLHrcXYkp%-PFiV=`Lm91XCY zeqT0b7jYf!R)EI}1GrmADPA(*EX)T_wSQi`KTf|Dxrip|H}UyDm97Ql^e1H3E)fZr z@pPc_lk6RY3`{=u^=yLQmzJMB>vWVWZ7fdSe^wLQLBH#2^Q8;b#`e)8t6;YHF^#l2)WV^QGNq_XMcqQ&h-UUF#(7GLVQ$!hFkl>(YoT;s>RuoS3!R zdKm0{3|gS7^=Hw>-P;df-q=1TXp^+ZKLXRBerh;x-FyX9C5egux$UQm$>M7HdQ$IJ z-(#kZQ^Nt=->`^#jZi~IIhX6qH|h!<9lReMV^zRZa+o=bGgcnw$CqGyV6-tVAo>Si zmm{MNVq-x3d-!#_r~XC`oGYJXUO&Iw|Z8C3q2b z&z6Jn^46^URfGT2(-%BaOluHtz?H(wen=~KJeM61t?xkF+e4+L-ZVv-pX~g9{^S5L zUW)1aAP&+t2T$p}dx?5hIS1c^$mqvhysc1(Px`kzQa{I?kPdJQd#4PrzhQGEkP`%J;^#XQ~^dLY{N6qu4;SE z+(V;PPfM&bh^vJDX$U@Q!5j7E$Cq4JF&si3Jf3qs+t$KliO>>8WxD2diEa!6Kf0?F zooo?5^^JCJ6;B(pl$*|?o~hpmRPqjxV_}^tFo|AbzM}Rit0zTKqUUoRB`=gaZ%aR@ zf1{^Lj$`^$Mxm7C6Q%0nq~bR$**-|RU-J!8q?#>%qS;`hY>6dp;Wq7Z)bDQX5Xu3mX;lER zDbLHb^L&JZXxH~z<@z}~n!K(fKLZ{l%V2lsPvP@L;@zbDOSAbo|eFzEtfF;$C$-aw=9eo|icB zb8^&Zmi{!PfCt}Ix4Dh;YxdQSJS%w!0svIIKTbQHw3kK zfYAL_7-#vl5h!TXrR1Xk^5VRl1&1v%SBc(64fg19Y@n=!by8~dmM3G%`{MBz(Jrd! z{68Q<0L#A!fMT<9xN!nDybEB3Cl(o#4}3n)tPtDyvA=;3GPwb^URQ)ig*hL`5+`iU z8yCR)hhmy3Lt!u?jw=H+9Qda;L}>I=@ybT?kGH3cd7xO z4;Aq@0K${`W`^`0W0C3a$ev4R)7ZP8#F#@-vX)V#36q1qZ%m0(!lt$Ye!DHEU4!|! z>@5F-wmTysNVmP$p*<7rVE-=N#2QY$!(#T4Jx&|?q2!B;o3oRZAi#Dwkj@|?)-e2n zQu3oz#g+=FsXmd4w}g01kdy`@6FxhiWqEiew@Er6sxrm-8L5?VJs+|$YIRB z$6)26qo}C}$!lJeu!R%hz#8u)zlIgzRh)w`i=?fCb>xq-?^mHxw$86bLa1{v0)W3^ z2$MsAj_O!?&j<{lhvRx`10UR;orLbpLh7^F5|tx;Cq)d$`tM*lTatc0XVpmhisQsY zt~Ijx5aODr?GMjGEfx&A6Eg%o?}G-HLD1aX2Q>VJ*X#o_y@4(dU&XRH$+nG=2^$ax znhgJEuq0sf{DjuI*k+Zq*PbKf{o_}&qM{=yWHh|vAjXsAVcU^SLrZF;1Ij+$%f?)P zJ6uk#VDGamn{D>$5%zDU{}(9fVpyGiOH&r#f8?>QcHX$!T#2duBWV_|fb}`V`9?05JG5}L> z(+F!SZHnQKgYI|(v|=WkhK{vx(R0Cq2@VBbLW8G+WIPoVX1EoDVSGW7?o-i&;L{%8 zzX9Kn8axX=FcC0VgjDP3II6ujtIp(-#qaqI4BZqCQnGo)mqs1$bzN}&r;cFBo3>(F&IbtJ4uyHbl#cY{$+GFDA=9KdM__wg!`j&nJ5}TeO|*_1*x0Z) zwslkl3+$6+C@TeuSIpr)geaJ6tkAR%V@?cC5ghaf^86}nefVxzFN9U&Hv^>7)dTd@ zMcK8h9NsMG&XT`X;zgh!GxAez-JfFc$%;<+C94MHS77que9QjHC|xC$Q2d8hRyV;^ z!RLlD8{F~XC`%j%-XeY9h4)dQorFUd_~OGvziyilB^G5nOQi!ov=e{H#%ElL`A(W0 zMMBc(ir+C=?Qounu-%7)vJO8~8-;>9mU~XqJIu7!J|VXKNlciNsv-zFyc0{dTq&vv zDz<_lo1zQVe5UQ*Ri<53($1;Kq}*1wz`xMS@EH{ zu>YI~j@`aIunszblR_I3BqIsol=m+0#7)z1d#Cj>_zP@=d}~alV6$v9Q5VO*{CYwh zZAZZVXX_%O*@^@nKfQ5~up%o85r_# zFx1!L&k@`cXb2z*VHLeaiXF#y7WUgcvA#{)n|-#-&SnAPl1bkn5m95qc;Wf$zZ%M% zxn0WZI_leP`jXj6DmZTC;?DWwqVClYLwYi^)9}F=g2VNAx>RfpX+#JOX}W;bj8zVF z*2f53cDcw7kvV>56VuGrwfogu9E|#YBdyF$c`l;s2p#Y37DWz1o#etVZ_zuKzqIcC zyn0UWqjfEXpA-m^t;Ba`#VaOW=?sD_wMW?2$T zv?vBAIzRZUaBg4G|Csy=iG}GH_Bo*-Vp5n>-(~V|1K`zQ?=K1Sek|0fw25V7K^cgt zM&dOJN1V(w8Up;Xf#kG-?EAzaI~-vR zQD6lp)LC^A(mr%#QUTDsumiT1XtP0x2W-(qa;NCgbm>au%q|R`UqWNllt8Ku%Mb*G zPQ>P?$eGNv6SfVIzscRo|BB3`D92S8o)&&wwc6`&HBj-At~9Qc)Te1ZSAW$C&~Cj! z!kkCZbQiy^>ZftELeC~(Hxv{5qIUGMjyjvDhjPKmj~?2}0--X`%U3TlXWBcfuval! zzoxb#UrFU`P%B1LPd~P>AI17oOivdgoC|y{DRs_D(p#*Y1mc53xZ6Ld59?H`p1&5v zH>3LF@#gUPNdDlaSKW8Crl>J;PBon<{YolmA+&lUFYrk_d8f-|Gj=;GK|iZ_)U0XC zjAV{H44LJSE{KF=p6zUaf<78oKkP(>63T6vO>k9fr5Kyzv)Rw}kayP`S~Yc4y~EgV zTeW7{p-+2*PxSP;%pd9c;TTh^+Jc(Od9%9Xhux+bF`GfVVQO;fWiJC^x^3~4y4hdk z@n3_%Jt|bjwU7JFpuRQrE_OLywb)w#JF>{rhQH;{358nT?LL0ppRu4{1dXnj2tUJ)oIH*H)!ts0|)MK02-Z+^ZSjeis89IPC@zSLD-`11bO-9qw=u`~v?_)U9{m5#0@%+>OPNYdv>&{wJ2^_y z-(NMV@EU;u5N`7exp=tsT{e70Bi6WEZbL&OempbiLPIixOP>UuLA8{B#g(~T$epW^ zae{OE*#@2v$;gtLI?31Bp=9(=%T0jP-j4r9wF_22Y=Cu4x`pn$ITyi-hm5lX4q2#XZ4O%I=070V>OU7V5BiT(mxBW`B>dzIY-rz%GtQO&#+Tk3(g*Uejpt8I`?& z(#T#d1(-Ly9weoH&8)mtXV}2aN#D&5N^tL@3G$~Tsi>}e&m2CXt%_tO%*(;s7>4#E zQZSjDE`KTeqKSQ3(vbLyKZ=p%_wGRcd*z3ZX`aM13hF z)|ojqC(PWgu6o@`>;^Lv>GPOjpJyV$Mo0-NkFBJnMGvpedX#44PmG}9Xs|CMjO%Z);qr7Dr5ygtU-#efOzlmTAA4y_=;vKUe<|BsND;n3LXJHoGcL4LA9L zj6^%i>nvzBtu&)vmG>HT)g{#Vw6-kJ1dZvQM2}>?>b`9NM1{E1w3FY`?x5dzSTD-| z*e0|ddk#4=fid^=Ik<5>&3+QDng`GuNI&YQKI9~v zYsPVy?yy!$i`7$$|MQ2*Ev(9Kmh2X4D}%;@I9|8@}SC@+~nj$?gq> zbCfWKUL{JB1P`+;xg-6zQ`R{>8*YrUSoBX{fFwT}VuLxs3o1Y-IpM_wo<3@Tw3c_W zg^ahcfs6?u3Sa9P4UsmZ*eV4YCL~%0rr0Mp7um_@F2{UM%wP`PlyL!T01YESZZ5q& zG)Q?L#V0>gvEKib*5QRABbU$Js3}#KgDkRuoi2YvAddTC;7g)-xvg<_!s}q$TcFSd z3X?=IPb`fg1sb(+&KW}Xx^kuPcezag7%o;m4Zd5gr9rwmbBf-lbX~KilR{l2P$MRa zlM?>~+D%}HTK%BSlI%dfCuFes@{H^Aqpvj$4q=Lm6U_fg&Gxpde%oZJK@?6yTW4-t zGk@kiN(4b#76c^E20C#ClaKWO0(IUtoTp|d{;RlWzX?g`goptIi-bW= z?6|ns9-mqAHXd&KMZVH+qyBBl3?RFG&%DUQEB5tzXCVgfPo1@&cMHzV+Y)hp9!?mFvYkOIU6BpJ<=Y&#|0T zZ}H(qywfxX@xL*7;B8@nz}xX&yM{nKRfF0-W^6B_EB~u|&Q9;J?r)!`pSbsLqGFW< zDLbc{{t|;C*L_%MAWF1!UdEadBd5d40o8wzwO=*BHdN&jg{e+>_!JVm3`H9~(W@gJ9=E1MTxGLllJQFc2k zv#^OYx2MDf0r8$9g@6Tn(k}E)@26uy{Ss{z6wQg;dVZTk4O-}?&Wj+%Pa9{HcNkUy z6%OJgKXF#iSF(TnV$OUG!+G|cFyHmwq<9gvUAIF1!~iNU6QsR{WepOFE#~^ZemzMa zt{}?=;C@&zf+1aWLe@|;epXeEuzt1SY(Ofq@XSFA`CKC@N=rtr)snp*!KwJ{@%If^ ze!H66&IT2W9s;KLqfDNA6y>BZ6?@TA8#z7ZOXw>VS*C@r)Yvsy%(k}{KfGD_A;uB3 zjN-&Eg};Xn{Z~3!QUW)I@LIZ|9cWrCM-Ce*wkDagc8T2&EFE)(yt|wT0$vjCVQ-hF zS`j{Rd8iwNjp#l=qSEkgPo<}FAAvFZa>%(>D?w8ldeEnRdra`Ph1S_VqX0d_1Vrz!a2L^Qg9B}XPTla&o zrc3(Dz$OhDka=zMPGYIZ@Y&1|vmvS@oR$3g;f&c!f7ueZ%6j$nEe)#7rB|b zgIMs*=ON6MvQ5~y-VsSyj}H?G?&DEV{{AI;$+!oIw$t*MS07lpuOAFVd}Hrl`g#MX zgWF;{erQ09Ao#f1JP>Gk9V9$dT*ym9x&} zx%fE44QHZKK9Z#F*n2(t1E81Mr9F~Q=p}virnQP9_n{=LJJWwu@Ip-2aikldKHr-g ze^fv{a_0qYvC5bGw2nO5Sax#N6$q!Sz5q7pz{_;&gjp*EF_DI`Fmu;~(FN~@dd^}> z({7C80#doU7+c%&+i6yOd^-)@<9SE2nD(VuPOUC5e99oM7 z;JD*#1P6d4ALWs1+j>yLe8M-qshtmxIvk536{XQ9f3il-BH!ex>#!B9G12g~AcG;t;}Z@=Mh{`+`m_s$w}MAN8$ z=zitaOj?TKBAiE^ecAw_KO6Xzr!#&CFtfL8lm&5Igf8H`?YM6T;m)5w2b$ zZR`!seGFR@+r4xV`T*G|7cc2K8-tnhyXrQdI;aSV1>Jp}KYs>9m_SDxjAEfi@Oz{f zbjArXLI;1t0~onQEPsgoqappx9IJr#3abWFP(PZ0HJBqpqwjvQf6s^Gnd!Dlm!O$b z#1~#vRL44Br~jKcOTC_E6&z?WZls86CXi2bGl~WqG)%|u(WB3$C%&(a3wmH)J6>I* z)oscA>;^*8kV@-DKuWLs&Q4VFC6Jm=)d?Z&0%c8^3)M65sd133H~>W`v$5{5U6KA$ z=5HlRjz=4E;6o~{T>UGL^yg_h!*u*C*a#|fppk9m&?odILl&)1CDyQDx%%AFdwA%9 zGLti=%IKyIYL2c)#s2-T&~u&ee#CcyF(uB4;CvbT>OnP*eX>#8%E74t8|H0@nKw#C z^KaonQl8vi?W6l>fmyt^YKAnZjYTOOwa1c*@B1jCvU0wOEul0|`N>Yd;9)#sYM=XV z8a3%^3x4;FT#7H88QCf=9-AD#6(B383Kz)YIu}qj{Rx-6RdFXOhzelyBLP;$ziovg z@yQGDL-9^V5&OWlAVCNcBG1dxCJ*Ni2N)i)Z2PF%M8650yK!3FAFHfBJbyc<>+_tF zOl9|uvD%E0V})|wnBayJVuzg^eI{qZJDH^@L7 z*a{GnrU0^jbE1SM@fna~rMl$0OY7nm0FB-lwwqhLbd?uxPep^wY*`T7QHg$4_2yOP zqi=sJW!;Mi`Q-5X`9FGVoiE*>Q^-At<0eUs<}~dlW(NuG$o04Fbt3Ml;~5KFsUY)2 zE9m?_fmh*-t3IMrQEJ=o4Y;t{7A2mqB)85IQ;vJ2bk=EC6V0o1b}dkb@6o*JSQw}k zdgq|U{DStxB!8xZl}gOArBsjtz8hft=SAE1@Gpd}<*PGQ4>!UfZ02~nP|xQ;rk9v# z)u*M;-er9^P$EPoxX~-1_8rZSF=V?NZIY54hPsJ_qjCgk4&msjO+PP3$(I<~McT<2 zCFKxQ(wSy5P!wesAo2;syXhz^0Da}pQu>ZzWE)P7ibS<*`}a-Eu*cF5SRL}=VO8LP zvqv>)gBD*he4tuk!NV1bJF(piL~=SIPf1e6bKE^kAu()Sc@2qP4Xmj7LR>0(c&M*c z0LK-qdaj|sH0+3EU!&EG!X1;Q+diVk_@Kt9cJJ_!J3)w3y_W8FC7MbT?T`c82X1HN zlQI^zgcL$78K)*?^bAROW*#!B`kP?ZBOKKp)Ird5B(vniN&L9#!(wVA0+e;4y7KUb($F{OK2Jr$SI6;n&AnU-8#R)bxDX5W zL^WM9AY)L1IhJ(DUg+~^5M$hJBt#{FXten_nxh;Eez?wkKmcoJBJu&BrU`DEDmyKe z@w?OI`KlisvsOE0Cs9vQyizpKRZ~%ZZzwDKk#zgcFxU2*=Rc2_J?_y%h5O$$jM7Z| z6z*R$_(vtc_5CBV^>NT#{6zZ&eusqBlX16!Pn;HtuecLT&ecM!M>&h&Y*>3#WRxoF zKaYl(?*p;FnYa!8W!~QDI--*q6aTkygaM|-dz=^7L4nlIvCxg*K8g5Z`ig>y?7ce2 zAhD`td;H#}Iz5Q>RY@^Fy#u{X-#x}2{;ho_c%mJxAV#F-n_=Sl0{`{;;Ks@4>y#zf zt7H26<->+mp*!+u@UHFpvslf5?k-4L@UBt*3atNl)yz(4I^A3pAdCz6nY@IZ6&pi- zKbqKM-CYGk%rf%B-J~5Yx;|Kr=BG-fW@G>HVExWohW2lb48-5rk{B&L*F9ERwbn+LMs0&XC+2UcG$KDQk4WUN+MQ*+hL}@| zNe5WuO|J>M<8>l{s~BE(TBMs7Zz3J((YhUOHoIp-ABG*yY1P}W&nbpQet<~=(I(xE z3x;mIZKIl*-rh2^Pb`<&d_V8Es~rqj1*#A+2+!eO9$h-|-wRIH-*5blRH)tAOQ3K^ zeQpu9F6>6UO9fCjY{g92MB{HB%fs-5Nbum9Lxxuh+D0I0=A&}*6wU(+5JeLUd7Ei2 z8SXZg^eH|xa(FJ*GqSgu;4NAPcw#G7$)P78qbdn1WOqngbBJLef~Al1`E--!2LUj# z;%_x&?L)} z)IL1b>wsBCi$tO8JMgP8DLnuW}m&fpVS!y$&;!WYy6s z*n}f{ahlzL@secI19Kg+S9+U?B4i=AEhC%45aHPznIz=UpM@q+%-K`ls}Rf=txeRu z8u%_?xCQQejGp$HOMTI&cd^s=_J%zN*+=*GUF+1j&H+87=q3zmfCfI>^9vFs`V{B) z(<#Fl?D_=ohviO`IZ9R?KSCZ-=&^-dvdA6K^4P|*1Bpk-;NQlA+ zihpWr9IoE~I@8KAmIGP80o-;7gpUrY(5QbRFs(GuzHj1^D3f;id3rtBR4e&pe6OEn z**5rbMONdrwt1iXn#^c1zHB5*e-n{xIFKUc*>V445YDn-$t2{~J`5#$_FEb>ShT-# z*|xDU5HbDHb`X9y_QryM;P2k`g4iKqYq$IDkCL1_Saj(;3aM%g3CJ=7v8@OYwIibM ziT0goYS?cGDrmSDt#u^iodVAYG-(7+YHnFafa>zNOcE!VQncqmpcfjTwd+Ma=c4B zymm>Pyt}h3@Fr|VJM6vfjuod6Tf79$)3^#ZX#VSmB8jBn*vussUYCM{!)H@fGFTL* zGcmu-AWFv*->Jq{nR=tsHJZ5(D=Zp2r4^mZ@DKUPG3zmbbEf4x0>S&2Dg2PF`NP~4 zkDf>8?2Q~O3})IHHxoH$1OIL=wy|Lhhb+ck>9*zC;f&tjG|FF68WLio4s#OMi-VxmK{$gpgY* zz^qDa^wrX#S#N_Sr`dwiH?P)$lKH+h*^Y-4Mf6Tr=(6$+2Q?Atvyg)_{@p#RO@mV> zn)(8Q`Rp(=6f3J_a#BUi@>~hjo{2mhol1I$v7+xjp;^ZHGwD;inIMvU?XT2}RvIPJ zxb_j+TZtg;G8Sk?dg)&tRCnEkjt5PMfD^2n1LgufwGBTM9_pkXsvsIQAzyj@1|h|^ z!`SQ$nYIk5B}C{b)Jh~o)Fk&WiNuG0+*N&evAy(E|5nRfCcA0xo}Y%o3x4MuJb2|BM#=bdMu{t?%1m} z)r8x$E9|w-_08;XENSU(lb3RKSkRss9X=le(sK6qLcaerlLic^b^5oQo5&~1r)n7= z3$@uB46P1h_m?E)$3o4?%*2nSv+6!2PRy7oAz-q>hN9V@YZsI?oO80fkM`uEHQb8J zaQ*%e8zY=u>?S19JDIk(H_kahVV;uDsgUwMjA@%sXVlt!?_~SSVTkp11Lp&DVHT5( zP~g1t6gUrXuaNkNV2gibi8aX#p~B9j=3cwQ{+Rjm-qCC5B4U&x4O5}!zyto&;d3qj zElA*3#_eBar$`NWe{e5I=oB=o?tK) z(Uo!|RBW}1{vKPgpk?WL+n5qg2YlUIlaDYv<#WO<`;YNDdEIn(f0@mzQ^+1~1RY%)wKCW!@mx4-e3?beIjCy1nS+1+ZNtcUHH z=^lrQ)RC%KeVT>1J^3$ybfFKf+2-;Tqk}5eD-;qFs&bh4Dz?(?<0LkLx-~AQh4}-8 zl3H{UJ}%QZPr9aiB40#Xw*%35G)&*LIN!uj)`!dd;T8E$skHZH4q{V&9hqM%x*oG< zWrk?P$I@d=VRYdH2q^lWnUTuGH#;4NwF=^FdHJnW(C<2gs0F&)H=h?%0%rz`?Msc# zoTFyHiMP(!+C~z9)NdmhF*$dK)UCxN_l8twA-vi-R(7co`5p2BQS@u$jt50KFJ)%C zhaqU4YglFFzn9*Sey^f($EQRw?MeIQi9d2uQii@&5>V5wseFxLH|v8EO}lz;Ngr23 zyaSI+(eqoc-Z4@+ZZPj`7Az%$Ex;hif#|KEua{7}NF6D6isW zPn{C!G{$sjnAihWO%_^Wd$5($CK6_5hPsIXNec_wwN7`X%BD#g5r1A;{3Cs=xaf;z zBvPS%j_7z0$A@;AcU}!AGOXZf6CPX+A^|EPW)##~fD{yd|NgCh%;pI3{t;$^FDfEc z66E|y5Yqp;{_x;n9!>KD{93%NUoM|E3t4|BNpe8LKl>l(&c%>f-0*GzScv%s^ups1Bka_oo8U=T%PYwar_gJ0F`! zvQih)Q)3-c!j-d<+lORfQdEH`)a=@ZtUJ&()wtq#w@=k0@t_S2HOnPM5lOEp{XCftXi z9>@^r6VllIW&-;iW+f0`?64CjCDI#rZHZN>&Qg$%hq*t0?Mc6r@eZpcb2W_!WrluE zq$tZ;M%xr?_Ke8XCmc$}-!yEz6{4|s@0SDX+ytza3dm(d#b?lzWHyyRT1fg|mqu#7 z334A3Tz`x>L^a;1W;~c9j$qbdi9y9I&o?6!r9@h(kV+*pc^1a;Z2uFDUm7{R-#R8Y ze2N?WmvlQj22hwpoR=T4`m2Pq;P2WS-l(P_+q+vVlUzIKWDgC)_g4oLq!*!^%6U9> zQ|Ox=0e`Q`{`k)Z16gYr*IalSCI+WxexFM#?aTV&q;93_9ZfMndnp5sWof#OP>FMRAjLUjy%duz(DA2%6f z8{STIMb|!zY|R8}OYQDMB^Qkp6LF6u`L+5JM*|JJttsVovYv1bjV$|?*8``c_votZ;F8pw6daM$O|a>~al z_`7!1(Z9p(^+WB)d;co&2?}Y4XeU~^^s}_OY@qZXBsjn>DXGlW;Hwxfd5zBOFj)<1 zJN@sc{AS-W)=Gw}Vq*O1n3=JZnvwvdBoIS)ycCYV1yf?d0GhEs6?lyw0ZRdEW@Z?8 zX`7(cPSU<_v)%8L&Y(=Dt7BsDp|Ktj1j4#hfjC77AW_KME`Z=By?o8#p0t*e9qelc z2OFSB!Sx1OP$y{qS*3EWN%v3Pekg)8U%GpcuS0rQ^;=Xab95t|9e7kcP zCj9b7mpVv!vS?EzS7oQfHU7?>F`hH(oqQZ-EJ5P_^PJpXZQRW6jiD7n?%* zG~G>X2&;(S1VKId8=BrSjm~kr-Mo3zFwF`*(l}3J&|-WuG}QXRz+rSu_f)NRb`d{>YQ(WrK_5>MB=wBF zZ=#3%PQE6N2p*O$Z5wjp6*sa~XR1Qkf&G)$lxgqNB=YCY6Aj3jjZ0*-%9|W8*kN@1!ZQ12_H4d zVvs12n-e(0`+eK@1Ux^xZV%@P#WQ!jkFnV`vwlh6MWgKjQ6!bQ^gmaygWp0$ewdxo zPRLulV*A?qXdL^My2!N1BQX5F2u(D#<)yYjO?vw-MkvSN^}~DC@qX) zHAb}MM|t+x<&U8t>+ggu2~$^q%i4RM4x|)6#}qCOgo=%>t3-j-KzHHKoo>-M0BZI6 z-qA1FA?a;cn8Z!LquKktX5mICu` z+`s-<%yDwW|eb$vQ$P+u9jM-)Bg=tL)5!B)`&)v|_VDEy@9CYbNO$#6SO zP-KyVJ!kZ=Z*7~v8J%Xsw|u;_jcuEvs8^s7IB*#X?)ZwE*46G~QTf&!$2G2fSbZH{ zhlM|}^E0P6#*S>YqqsyMQAj6IkH|c+V#SFeK`6a|?{@H6n%XR1QQ?O_=4)AhYd-u! zP0xPdnvZ`6!Xk_Cz9}UAdDVW@Bo1!F_(Q~GT_Oc*sU7;ZNwP=h4<@%6w%dN5=!_I@ zO%)^&=16g0tihy)RQd6}?M&kggS*4B0M=Do9WBbSr`;8c|C%jmIshXg5~1Yr5RG|R z(4dw8O4=;*?hwb!G$mC43(n=eTzdDao9Hf4b5vF!hmc~mAf9f3?M(mynHSu*x`Af{ zem7v{sFTve^WlfPVqCRwE6Giw_ne=|!i%NQMv$%C8=$usjL2I&7GlBNLp!v4!ZR4e zA2)ewzFAX1X&QQHc<~!dELewC>(bL39~^B%7Yf$e?Ns}D8NY0OJ}az97L6oWHA77) z7d88w?=<{kY=UPgEk$tb`s^J#IwLLVG}i?dnXDn3yNfi-3MQ-vbrkGKZ+o7Xam~*+ z|H{}bd;wFd5@2+y9G8&u z@=f-dp+Wnq0gc zo>p))VxXGMqlyjm?LFp!Ii3+v4Z7&qu3O@9UsP zo+g`0iD_kV6z{s~x^U#bEpv40kYBwY4Pm31Lv8(S;Gpq-u=g+pGK>{XIl&5G1Mqdw zU&e1t=N8mxON)uHq$kctgWH7{yOE7Bk#lo)+&~KL%xrQbFOzN5cLNV?9Xjf8DZJ*o z#}#wxM#LxV=ZCYWul%^9L{Xv_)GUo+)Mqu?RmW=w_rN!bjT&}*hd3X{X}`DoEMuGS;8-|uSMNh$ zm-U?UwvV==pl>rSvSrs&!ja1S!o{CeIm{hkFFAr`Yi#-(#NI|GHrEj+pSS=cIWfjU z37AoE&sNTKp7-w;xzaJnE=7oh%&cmEiRS{WpY(1ieMfKM#7ytUPSP7%Ai^zSvxG+N zx$2Ne%;9an%ih8$%%%FMe9vS(c7nP1AaTxGe#vZVFG2Hf4ncM{aqy%zU0P6KI>Si= z<|RXyyx}i1+vAA-NB%WQYy)%qPGUay=!Hd@s$5+_ zvE>gfb+7hR=VwuOg_U$3CtVjEMsgDPApKFcS_t^*r-&EDx~Fd{y(_(Z;t+JXcCS}+ z2W);5FLs1)jKnH79niEAJt(N16ap6taN4(Soi7;-uY9+e!OnYob}VDab2xAJwM6>o>em4 z?fx=`g+yw0SmGAMiwp1#-0iBt!AwAp@Kd{SR% z3_4u;x%iE}WXqB7F}cHW2!PF-JWWCjDwl{`_rb}W_ca|v_lS+Hc_&ND0;Q{3dnHYp z{gWI4d{F;84}k?ffl;GB?#`NFtP9 zwy7IZBk&e!+jyNCe)%Oqha1=Km2<;W2pI6sY zevZ3*s1LPa(t{A?wMU}l+@y1SPc!93vJ9Y0WS24#7b@1Tz?D4>CrL;4Nc+jN=vsW3})^ zswPnqOql_+!_l#PHXx(x9A1sZ=@xrQzG>;PSgIX5dbS5n=AQe1CKWHJR&AbKjA0wY zsq1`m0yv1!Tj8_oJ%qAcZKpk5sc|b0m;h7HJFn09-4t-3O+I@8!MVIFxKVkqu=RKgL$X;A_m8-*f)U#@>U| zrFKtI{h7gH%LZyIz{FP=raf^iw60@D`^#N)ZL37sPoexCAHudzQP7q7UknaqG!F!t z@z2>o&*NPF%D2QXI+j1auw_>g;emiYD|AN8flAQAIM+6t`;0$j` zMliu<8n8+=h>VV3!|EPt89!egHQ+&AokeOBBTk0kKP&jNVC@y{%qfd8Toxj{6t{!u zxFLe7@S$d8B4HWvNvn})Cdrnd{$kR=B=7a>H-Az4S7Q=znCGA_zt7){104)R*$ej6 zXu}FZM1rHc9~{Pu5%M7wqdX&frj95i*tflFY>E+fUN!U%vNf&OMaky}284DgXE-ec zRQ7S<9Vm=O*9?;fSUoD@IcIu}ey8pzO;U++Tp8Tju7&Y>|K=ViX9<$Xh&-Cm9oGwa z1MT-T3IO}TStjV&RPE!#14B%r>_m4CLdw(|0Pq+(5@_aA0fzv^@#~T4Y!rJ^F&Rj) zUhHu=zT=kj89F>LOmCJj9{Xk$5xsxR^+7PqzRQER7Dq)mKiY8=wP-N>>~V_VrSRdJ z{7(c}k0J3Z-b&+OrF9VSh)*l%k;b;i9=}tVBkB+E)LvNwJj}2$l@K5XV!`we->CIH zwK{@RxuFn+w;@mci6eWyhN$Ozm;^rmr0bfPJ^PESxd{9x1baN=5HZ*Fa^a=F(66UM|uvAHDT=iq^=h;Vwxp@;he_ zux{20GF8&MB-nIaZ;`R*#E_qX%AhgKZp4#*|FV+_?B9K(MHxmfljRzpa%OwiBG7oR zHzzPu2{;8N!XO>ofE_!AI2~doiA)l;6{!V_#c)!?f`mR;UvE$V zhH~#`g`a|vNa$bWXga6!sG9?P(? z>)aG`Vlf9wb!#7)^-KyipizzMam#&5@>?|BVninWD7gs)4Da;jPnaK%75%DfS1a9m zh~qv&kt9p^;!q`nXrEEm4p^04Oj_4jvKaGEAvSNyU^|4zNE$qA;$CCodr zW+_mP+jm>hR7MlbK-w1d>q35wK`Esa-*#2nG`ivMXdwjK8|l+~-ED2NrI7#k=`>~l zg8S>V!{sY0Qn5(o#pppgEpNEPi@|l9_{l`)c4*^6rZl=)hoS9#s$iO3aE}LL36}^7 z_K3tZ>%5&ks!FX#vX$K$x}!Xxdv(%+N?+TLN|#F9{jweHj#JlCT-Q7AN2xXdiz@RSFtIJBT6(*)A=)Edbn?5hsCpfi~VmyhthlV+rCu`g7g+X z=rKaAisBrdH1?mz7Ps4H#u=wOuy>?L@&jl zUd0RZTXli`4`3j@(xCdfR!}c7naxAC88=bGv+f%TVWM8xWn^#3agFTP%j;!~Ef!?=+YXX}b&aSHPj^>Z29>EM?`9J~)${ChS zbM==eOj<6jZ0wI^gR`28L1!3p;43ASWhv}Q z;iQgBZT8awRf@eDxaacfv?JXSE zXL>Mh*E8SS+w2=Jj%>1s_n73B@VaAg-O<|jqU)m;k;Ti$D`U8^nG&EPfdU$K6 z+me(iP-!zDxs7^XToOK-d!50R^t7pd>Ki-jIi{nvmkKpE3O;>`HG_CgcE*RX7=Ie) zZw>4r2mG$xqWrmW^x)A+mkIZ2pKvIcbtRZ^Pw@0=-@8PV(~}q5f+Q4ua_(>k8yu)V zD`~jmF|_(UgF3m)1mfH&>?%%&qSDo6=+2oqT+(wl1>WBR1*p!)@g4$>9yf8|pF!yW z>_LSx>S?{&y|f#%Ena*Vw&uKY=oezAQ1{ab%5A)O zrJx7jatxcRPzUYDT=bj2SefVmtcLHyA@u_6m7aQG${%jh zUf*sX6m8+{ZUblTwxyr?Il{vg&mhZ1B-R;9v7}XV%VM+MMb=DuTcpyOpaoD`IDlwL z+}kg9C+iI5gPjmsQo+sP&rMy<1Op$lLp{W#JpJ&&4h3w4$+ z`5MYh5jF}jk`+{o5-(*B?hA~SO$ohWeJEDnVY7Xe>(V)9hL|lSpDoAM!NGH-J2wWs z=2soi??mR`$DLy`a#ul4G&U@_8zD^;xBrBM(g1R}6$w^+x)3 zcsBl?`>Nf8rc(|dgihH96;wkk)`^hwWp_&PT8J{r@IOVat1%V41a>h9nj#?rjHGli^>gVI9apcJ7AVf&V;y|b4 zL@BE{l%`$uwkl#AVJ<6W4yt zzHA#};yyYbKs3;z>by>|=w}JM*$A@Q9r3MjZ@Bvxq3ELG#+)h78(e?h$@$WX0=f7G zWxB3-dklNk%Dey4hdmYD9TI%MG#_=ABulwHXn!3d$q2B%{8l{%L(&}8{YOPf$nA6Y zk`0lvB3t^5_472{8*eP#C6o1s<$Jli6w^zr*l6UDi;UdEd1IQHWyLclVH<>1XxxdO zQOc$5VDIb9&u#6obx3eTvJteJXv4kty&o1=HzB^H-x9I%sl!2Io^O)W=I``J7Sk}xhCus+9F@g8FPnv{rBg!GFW8~=8*jBDTTVq$ zj32{h*vYq+eDwnrH_Y8HfY(f0VeZcCED6tdW_R@Fa`%5{(`zcG52Xw9&wAdXCSJ5y zP%!=?C5-i2i-wV_Fszu6ria9Au==&IHs`KaO15vt#CqqddNP~_VA`Xj22V3y5BZIz z+Sf2m`aJJs&2C;4-gTYTe}mcWjXRDSc$lKv&)e>lKh@&(wKr6bh%VF~9oHI%0aw< zCYs@W48u%+FCsJ{F5ke+uiyNGz+;l)oF9S4R*?6uz=_|HJr;i~2*H-7L_}RSQxrD1xZ;Dg6xcaeYS?w>GjSIu=1N3;hSiCW~iHbTY-@# zK(M3d9-!On>jF8kR|S$NZ8I1+69v##<5CZsKL-y8^41W7=FAFoEpm(?=JKgZ# z3p_4?{XW0Y4>GgbK}bb-%)2=ssO;6hTv?VgZ;YtnQu8|4MOth4XVwX^22rh{PJGXk zw;oL~5U90@x}!FjWFj>r%2NdFg>|+Fh8hL~{RGvp4i5TaR2Cuaor$C3gnfph6wtwI2YO?EJ}kXC+;09JT~WyA znr?^^#4~!Q(Ler0nescO!FAy0X_{UR$ z^v%FW%qU7>A_$|A7@7NrS90-ezmcw)YC%2UCjSMv_0~9>RBm_B+x;W!bcDU){6voH zQdawE_YtL21Z+KieUEu7@wUf!(uj|lV@SI4gG6_dQ?D@|2lHc)Mp>G<1lSlpL$-(B zs5e{${Lcu0NCeK5@Z!#pu%@ZE#_o2rF0lvc%B(;zq6<4NR>Ji?0VCsf+g30$N3Z8R zNT8^$3(bw$CHZBxtI8c|ArC~Aj=>kFF4C^~;RZ}CB-2JfHE-H)lWFu_+t?luea&cgHaF_!P}cA>?o%T0Q((ec{9(O6Kg zF5A{+UMN?Qc?=iT1u_2xIK3NI&y_p^(I@uC^g- zrW|%L8`Xb`DKS!ab5CU?fU+D5%$1vVw{pO5jc*dD?=xUQpP>Si!PTOUjUmpn-!=- zWk zNXNjAWOj7eX;|B=ZiW+@0D^drgsKl*HJjQLquWxewt(jX%dw}S@F!(XKYi}6AWy>| z`PbAy7{^wND|tJ6Kv2D0o_xa3*^hRr+#jv4e~XY!*bqpc7DYorb_C2W_riyn?v=qux`6$X7*T0|yy zE3{sbA*X#e5CDy4{?RWXP!nHyUSaqoOE?4nJ(JmG#g6E9*;sTs61~ZL^v@cb*CADNX=y`Jj4aIfU z&wi=IKvUy;n$YJ1&!6amd#9*-W zj<%^{`j3raT=XlMbugHs1p_AJ?GFcaRJcI0QAqOo`Bbvg)rfaVH=K=@-*-?aZax1F z=gblAG`hAK#fl;Mv2bM>RsJ+SNv=V|zQoyVg&i~RL*Z|*YPwFI850OZOB;Qgb;!`kk)UFtXa- z8iJAd#~1UC1YqQ|27;783+ha-(uM_>ZKI@b>$f!Xi-efAVyy#1kqfSHXxL*tr4Nwa zi+=u3n2ho7&4FY#pib{@gyAPjT@-}d>@s$I=2b~es|Esv=@u>sEx1LZTA*kGc1J-?Kac-T;Rt(7n~8?zp>*x14oAu z-t+ef;=gbgp26m*hM+=|S4ouA(5QrlGH0XQk1EcJnx+c_l)|N(u>{jI2{A_#v*Wev zCGnk2H2-XQ<$^4)#?>=PgjN~Cd{Whum5&X;zY?Le^-#!Z4~r)}_|~mK5^w~-$XM!j znW%0wd?Qj16|~QfGTbYagFMARN!B#pQuqd|K^blRFL}Kll{YICusd-{!>2u3Ap?Q+ ziSDNZ=`(GPAW#5#4GS|wz*!M9`~?9eQqy{(^SIbd{Z9c!;GgZUf2m!CsmOUZGvo$w z;IMZHbftB_IW@ZsE6en}oF61PUN$8ah|MWRfcZnOq*ExCv%T78^Fuyl@k*0qkA<*_ zRH;0o-@X*&_W%WoD{sDQo+u(6F=sk%fC^3Zx=o6Sv-+xSK-)jDP$)Pkh;M@6KZ*X{ zpeG2p(K5S#`$zdVLFz8C^Eu(2GzdzJ1oUA?jiA5*=%_KYm1!lIRA?rAEI)f&O>IAy zXIFS$b(!U|lV=OETPvC!wxU+Bj><@|7>Vlh25+pRlR(5jSfOzr-!}k%kjBod>gkr z^od}1S|ck!MOHLV7^3Gh-Sfh=n))qKu7_k63vM?P{Q7l-e<431$a2HOK|ItuBz6-$ zk@$TMJ>1BwMA-rZ?lv)t^Ijr~+* z1KLMc&3~%n?`hlN_37_%@qf|3Hzp#_s;_;kiwNP}?KwyW6U7fl1rjoeQZ1}jKZsnn zZ~^u&#NBxvE>0gV89Ugn?glRQAjShN8K=;3l@L1_QzEc4W8dKVAYU>}WFVBQVX66` zrre9P3xXNKI3hSn_DLG{aGllfdNtI;xm#q40%TFF10eP( zJGkpZP6kw6{i2MUBhBkKEU3i3?gtZ$R|xV@44j2ko^N*!qk|jD%%+XVteUS|=z=8?WDihVpH zaUC?&6}s$voE6^zDf1R(+dK|CeE?8f=1}eGfyf!li3kRQ`lyYaSz6txT;pCw_e^BP zaY#B)80H1qjZmDB{YatiJ~~&<=Yh11k|zrC zOw4LEcy%ZNuhYTl!@`%HnOHz0C>qR7Ry*gT|DwRJS)9~I?k9}V>RF}bLEJXRs@uGr40So@dn?TaWo;zY&Ogdx#vJMQMcJZcMI#Me;Vwvh34Ew9?l4`~Jn&V|y^%Oi zNNliRch4I>FE%LE!4QX0SW_~X%q7D$V(qKUmPKAFNgn*xlAnCP9+u!HSZS%7$2UlA zo(_TeHd{UYR2lmBSa)vV>?cERyReBYN~87qOyn_O%W&rfNOb0wAC2l2!CIN z5Cl=aWU)H8stMM`q6bkD9~i!J?_t>zPcVakwQ~7XK3?|1)V&K?%9; z#60wAb2rIsj4d^GsuatVj71STyJ!hm?~Qdakwg1~lh`dy9s%0>O`D~OggL;j;jKjT zzQc7M=U$&>NIQ3~C?d33op+mp=~oi4LR~fEzVzaZ6R(pugPH7+O7GV6yclRTn9Rh@ zENo$ru%5_JQT&*dT86@*5Kf~N#rVM6nkS2wP?<<%{F@NZS8tg`C5APhmrRGk9MT_RCrZV*`J78UXJ4o*K}AY*=T@0bmd_5F zFk=Og4%(=0Wj#Qwu0O9!W?9(9#3{H+mV<0a< z{XJsAo|1Vmiq4q|&kCNie=;*9lZA{&HzzmuVw2015dkJhnb09jbRypxW#1UXFq)%%GXbR(|EK|3&2y`7z4*6E z3SI7HhTq;D#xC&~QBtJ-R~3I)R2&-meD3csDgVD6puMQ1H*YD#ZTtVa#Q*AAYH!mt zn7BF;bu^B0jYaX5n5=|g?iO^uz%gVCm8mESm`ndm23=fSP&xIg8rv^({OCDKs|R;w zwzdksZ&jfg7h-kZxM;l=vd^}@e{jthRI#8!aQyY(4|ER?{)&oBQ`C9e#Nx^9|A^XU z_nxoxKlpJ_!jo(wJxK4N$c0~Xpn!9bhY?X}N3?tq5ksnq_|lhR(!i|VJbc{njin50 zWoraHPSiDLR}!h-%$G*2+owDVnsP&d^CnQjF&qqyol1k%2GnSC{>_y^0qmqcT@kM% zot<;pwD7X>GX^CC=BzU;tqup0m3E^a9bt741p8`@1~{uH3JD!w#^BdHV_s?}5|>z2 zwcAY+0m!W8$dC0)9yUamEZM6R?6;e&2A8{kq!yaOe4km20z`pQR93HUZcq+Unp1P~ z;x(V2svG1SH3i#+$~5i|x6Gz{39VOo{+UBZ1)V77Rdjgr-`sCPU-Cn(OdJitY)%A? z8VUo&pi8@m0|*d948({Lm#KT2s+Ql^^Fo^ewXP7(7ED~H4a=gR9i69tV`rI2<<(?E zN&K1y$}M=Co(kf{=nE`5+$oMWLN`4lr?^aDCq2&?CWq`W7wsVUY{QRX3UgZ>ayd#S)6P1 z84()l77ctBan9xJ-4g_7nWgelY(XB#P+l3nIOhAbpFIpOiXZR#iH0ZuWAm3JlnlxmgqJ@;u{+JZkM1YO*o%dX$sWkvM zBAAhHBeXZ5Y>F?t6p&=GvyP*X9SoKu2b9b0>}ZI>^W)rjB9!fUC_7pg2K$X*m5It@ z*#q}kFy!^A+^FCYnGqmTLQ-pZ{P}RK@1CbRX^}-JpTTnI|5y4W(ED7%&_U9uGj$PI zF!T2tkepq_xzlZ3?sZQuDKXJ!US8h0_)#)(q~tq}0*{`~0$qNfimCUko%kh@%!nw8 z9wPBd>JK0Swt3tRC!w;KV0oKpFtCgm->klKs=+jjxW{P((xo8kBapFwC$Xb8*Jt=_ z&W;pAs5Q5L3P`|QMNAk-|KSzWy@cFwP^I*Omu&!!FMq;3ZN24TpP}e73wDdbyPTJs zY@_F-sasFJ5fqGUP7do`^FmT+B{G=ywgKxqJBs)IOaI1^tUT&bI&?A4S>^W4VH9o* z+80!SBdAX*&yoCVrS2Dx95wIU1!RblEXn175}7A0NF7~X`u^G8lTi2~rC8)a!lb{u zzBo`!Xv}clh~Gyl&XmfgR5J!*dNu9)V{3$$qgm0Ydn8!wbnp#OjYftNtYi8F!|ygR z{dh!nRx7x z=g%qa6JYouZ}+oCG|Ct`-L4LYkj<3pdM@>{*YB=0i}T=otN=*G`HlqMY+cs| zJGXq$mI&3bO8ACy6fQPLl7gQUl+p^&d%vPil+B!s=!ROA{Cq!hM8 zD*aOCe{)}{wdg2C_MRQyBT7L=4_PuXF@Zq;u@WWmDstn+SHg;dW$zJ`j)U`%D@~b~ zb#gS{e_Oqs%J@5z$P$U@?pJX|p9+d>WjTXt#7M|+DwyndD46`VQj0o6;7`w- z9%q*!i6P6YQv9t#4M2eSzt`b>!;yYkv67H0i)F~KC)=J!gLdcrYQyY?1*%LTcb)e4 zy>HDn`}2|@MKAI;gNG}d|22!lWpT|bx(;vTRM2E6haV8T(NEq|+h)F!7`jmXp;YH% zeBIY*vGUpY5k8Q8v=_H_zmeM^+~suI$YH;ELYi4f!vq(F3E@@)V(VNhUHcy1#k1w# z-X8^hYao&RA-VTGT#9Nqx#A~2k?_D|*kjc&t`<{}fGCAVk}zptG^8)Y(tBhZXR3499mc9jb|}6UR2t&j7RX9wXvt)3eC z)AJWb>7~8c5bp62#W7M;n~#+Ex0lxp%iii@zx^4CEC#2cB2}LGTbuxRME@Gp>Kp1c z9WY(DdT;FRLW*r2X&hvH7c`^Uiev;Y4#i5pHmsHDkaa2y;0T_HeOC{f@N85zq+j-h zp>WeU@gNd>_0k8W*BtI(8b74jYpH65lbGab%~*!z87LGGK znJ%r%a194kcW<|t=W6(DuFjo~yDunmc>kVX25ei_^2aO>2!$Ne##7T2`w@@cea=l%ID#C4)c@N`UMYd5&iX(M^pb9() z_nrQoTC>p4b{nDTI5?WVGGSeah?uPTfzHd-FM(Y0@K*iOQq*_4rgcZdg8N9z)UYJSuVomA06RT6sExZN@ny?&2H;7r#40vVEn zGfHtrSAgAvP!~ZMa+oDwJ4R)gIL*!*m6T2g!vLJs;BHLo;d)3}UNF?iy0OAzo7`4?4#9dL0C;U*iK$wIvUaP3@$ywiX z8T1n&Re{6-rCr8ZJk~6OrEg-Hc^DW!xX%sfoy|y5&ks4H2VU8pycE{N-&q98-3I#E zuzPFln=T>-eP>bOi*xV#dWERRy?|}tL}g33?Qh70GA+W5{JMf~Voq|r&=7^7n8^gr zl;TdiQK9`edj`gxSogY%X|W7lFm_R#n%8Zi@b|l+hWS#*^|$8(S{=N({KBWA_zvPN zM*%%BROn~kET%v_%9AtutY?M7#G#=-&$tJB$8p#L5;sz#Uiqb=mO#a?%L3iJE3ZKl z6hjpq#{XE}k-hrfMsn@<@gqtTD{_b+H$R_3CI^%5AN?=^-}e)avF?*lW~msVJp{kG7W?*? zdA`oT63Izlpw!vWejT4{&u3O=P6-Qi43+%1_dKT8*7xai7~ogAAvk|&wzttMAL6z* zkRUIct@iElQ^xX8Jr4V~Si+cuQkI&lA|-4Rz`q37KKq2MWL}sT^7VUCFgufc;iXy~ zxpdAnJ~I#hgCh}5^h?Dz4q@CVDSyi-ey{-K5D)%Ub6;Q*HJh$7%vYxcQbZ!dB#X}^ z(V_iDG2vm2AWs);H!fL{Af1sj35emkS8@#>3xDS97lJ z3np26&RtB0cgD5N=*~K@cwj?&#*m5m%mbB#<5}UuVJ`Ot6JoW6R)$mm4BAHL&Jh8$W*avZ$@L8TG@X{YRpDN1YkcBMMc00tKxH^ zx;IMV%av)dS=HkjN3B}WSFcGs#y1(PcndQ+<-fZtW7W~Pc?nqL){))37Vo>0*J?9v zj&<^NULp6K7k%q5-$v-1LWA4RN8;aBB^-Et3ot}*!<}^F&e#0#>W{nn+5^Sb7kz6V z%)zN%zk_CN?2;N4*>jgFUJ)Dr-P9ztQ$nsF5>id9&CF6EU}0h&gdDFwP1M{!up`i7 zNyDgpULXEW(R0gFaSiY!tMar{YWE#U6b!YtZ~d=xLGl-VogaRPI9ynGxZikg3{T-2 z=QzEEal~)T&u=ne*2(r0NmNX7pH%dX5Dwu7@AxAwYEX*mmTN8I{uv-Q4}IM9rM-Ds z|5DMMf2@DCzRcuISaPhz&w12TVdg+M-4IPru3Nm`f@6T9=VrWMcDm~D>;_$O+xdD5^jc|Wqje6{bEo8fSk289e2pI&(?VmSEpV%EbuEZHU61|HZw83hhy%+sy`` zPyDVzBPGjCT1t`&2%qjHhCjQ0%K+!Jw+q@5YWTINlX(61`TPcZ%%SKD8H7EB@TQx4 zJBQ58ePZZ6>fe=9T6G^Es`0pjAg>cRaSROkU_sFcsjBpsqa}507gkRrP0^&s}Kc^ur^HBql&9u|59rD>xp_NsE2V-c7`QXlS^Mk*-(pBEa@F{641dej}BmXuuK3kFHq|7Z5)frjWY zij@+cJ=71mxEA8-7nl+9?K!-QoTq#1EZ(9H$rbjIZ}-5~TQYKcUOP6{-|5`wMrNHmIgXLtzqn`dQjgl%uzQ_1 ze5(Y0djq6VGq2^tXVYr703K$h8gvTE9Uaa_iMKlDQ}&NK<{Cm0X{aM6Mf+yLkr9qv!~d zcTIB@PBuu>Ia{Xl*r<6djZXmE%{ICpxOco*yf2=l@w~?;M=owTgl(h|rqkHERSmB- zJM5A!*xuBqnjD#SrD7!)sEX6gS`|AkVoAIbkE*4gi=3Y{>TIpotVh0{hmX>Q-}gcu zaBKm?Mvr`?tK`}7jwCn-a|$X9lgMsR*Gaqnx(`cZ|3WqT-^P84RW0;KJ4BPUSR&JgyyF(ugE=*_ z`W{uL!KosVYSv~yk?Pzo5=>Kc*^Z%rq29yGt{-vAfdKE10~bQ!=4fdqL+sXSUiMEi zc=!STPB0p704}-R zJMKZi=~0Fd+C)_+Y3mwXyqGW~r%&!TK`= zt))w(?9w+l7QNaSQ+X_uT12ZToQP;LA`Mc-C}Z!0P6e+0k^MiYZApK__TXa;?qu$C zs?*!^sOW?CwB|7DZ>yzUHjduN17A~|6r6u^r!u8kP!D7$#t<~^XttSORo4`O+dvg) zS9Hm!Z{gFfD}4g^L2tT#Q-Pe=YQ(#GWPB4pEXbt{w0AhG0T{F$VwtCC=17r1XFrck z;&UV1T|!`GLy@pth6ppZU*K;+`L(5Dm7a+QGPa@P|7Hjwy}C~)&#^nV=F#(AQ|YSH zhv7&&4HxqqJBrcYuh_;MD44Z4l50vXpWAI$kNNR3f1yoHV0yORJ|u|p=>`@ zwprwRe{RRj&r79WDiK(Y=nNDxd6Y|`G_F;q`9~Mbf4bVl$48|L zW3Kx^X zBS#F}8-7r773iOt$vl7r49(>A0N0Nmg!Qz#*VeyTg_q~IgUGZ2P?sPW@diQ^#_Kw+aX>~XHq*|kTY{uMNc6xeH8xirAdg#%v z%NA?2uPFtFQG`2ykHgUTL*(ptsgbF=2for^td3IM{N8_h_v!p|rIrbip+ag{ycrFs zy2E)ih8|(*dV^KL;>GChQ(glU9z5esa_lgn=BMdu7?Gs)odmxd%&oSxT*xb}U7H(S ze80G)bIVfQV#+WYKX1x31T5Eo#bmL6rye_M$zY3y7wE$tM>IbX`%RNiPFgK2==?5` z!Ze=7aC92#I|$1?rCg^%5*e_sv131%3QnTHFtFRRjr7T}a4s9z+sstG469+OFSVbxuY5^2g}JxZ2e z{efK92j)4(oQND*Chk#>^Zx-8LF~TPfDMkHOZqg>fDy_|BiWq`W9ZXR(PI9nDQQEc zbPNKqG`hE#S+;&b(^^}fF@f4g@EMFX?cOYY3oq0xxK}$aL5o!ot~O}Agx`2250X9d z(uOjR>*%=(AlM5bsMr(&_&tVU*>ivthYJD)ffFI{-p|~7B63&JuAh5Bddm%S(sS)I zuCwdEe2t)&u;Yfg7xMKB-?fNeg8Zv*Y)P9RJ1~M)m8lB?eG$M3;`ogC#kRrg*;iaN zWE<_r*q7{=Sso0pH(oo7ztdsU3^tMd>)qRU`b=NTy0eGh*M94>_ocslQ+vK_+0sUt z9Su5BW?I=O)18q&!d~GAGB(TLn5!lf97lKM)b{MAYl6R>YU35QA?C*U7xIOp32~I* zV9Hoaz+kftUN_9`s|m-ht+8FCU)lIr`s${~dS)jneklIxzkDbCSvtOt4Ol<% zoXgv`6TRad?mz?QTsS_x>Dt-pdsj^7Nnk^5A~x#c2wJ>;{)JbjFKyVB{_(a)(&y~a zVh!29J0OVu_kV6qzw>=71|-v~%uI;jLwy2ZjbxAYvVTZ`qlfgeKJd5N`q<2XGz!xs zkR#D)o)u7Xg#*+e`A8fQI3R&%gTo#F)t<^!Wtsv&H^O=jv)+!zIO*+24x zy*|Ha&?n6{%u?xgc_YX=jT^fa&6jz-Unvl8xpoKwS~RW_Rt_a%!h5VZR^vr8M~A{q z>|A~hx;TQuB258-kYbE-(DjK?k%M;3FI9R^*607~hO$v6Fgsnndd0l-tvfaWR0o)- z0%czd>*HR-HG(#@)mIIg@${2lv3NEHdZKLHvCkgKdf0sBU6wTv3QTYpti}R6Fj#Ti zLQ+JKslmkhjyM&FLpa!bw1)qRQveW<^>3qJN7cEQ|Pwbs<4i6SUB@dNYQnG`& zkiIz&8^9QnCCDEtDutc@{GmpFs#u;hV%qc+RaXljI3_6B>IHf2J(X!5yf*S>?a z1Ab9ADw_(nDcNH!I^n~71859nbTJNj>40TYLV1@JuM1!Tu%K`*=@FrJrE>5XQxv1E z*|4>38)Xh3IhyWyXt!xMgW^O5>&aeUqdwF7ni_w#elQO5s8s?#@&pDp6zd-~g>0PI z5nDh4MZy?$b{|ua2V-s{I2}&upeSt+%-W+qf_g$Q-VlJZRMrd!2fR`T0bFGPPXh3Zmcru68P{v{n zUq7-tJrC2TidR0;0_iZdv>W!&j7l==tLYRk^ofY8{C5A{FL2N-G^~)@F5zP(UC}fK zCgUf5CJ486L8Fw8Kmd!v#q^cw`@Z4Ya^0|wzP`o}I@cIJXaFrPFT2*HEJ>yybXbG$ zhX^RV&ln%PJ3&L1W4orCa~knKH`)nbrL=^=)ywA^pMeh?V-d7^#J!?H9Tv&?us(H} zdfRwbEu8~_o+xV`hXPi$mLoPC2JUFVEXHCKG@ao zjX(YJru3mVUOnh=YH6d)69?@XvkfWjde&D%+%0AfBLYo&l+pgf&3Vrb;}QKW%^1mC_0VNOtrsOlDgKJ?i*?pScq-GkapSpCD1mO9tbRiR1v1DJWG! zn_c7So5T@yaMxAyrlrr`wn_U#%nsQsyTTs$g*KB#;!qO9D5A$WrTKFVGI9uV6c#U8 z%tQB=V_>DvY&aN@vjQk6ZkK3Vz1B_~Xjj3boQ@tlCUXsA;V+-~Ga2*K230U4N+goQ zAFNT9H>TXC`5q&5#N8x-5mPp&DZ;P2}GQwZJJ1#~CzEPl0XnE!92N?{-T8NJ`LO>d<8}fMr z4J6D!NyAuhTg`dUMlmAk)|3L#e8&1%_*&NyqR<3++UaxR(K(dil;*1|02k!QOF@`m zcKZBiV>BLl5q{AZ>N5Vg{}^k<4~;a#m1F)s7IM5?Miv1)TFIB#46*g`b=RWlJ%Jd( zlV=n^I^v3J30`|KLZF7?T6YE%#zf3cBCh&Jf%G~HbfQpcRER(6e`L@8gi}7|UouI2 z<$6*CmM)lSf1iv>PaXBoqX;6{xljT(fX-TM%-+wrpc#FaU$J;r9`5$Wy3Kg(65_b> zROr+&BAm{X8S$<`5!U(M2R(TmqK*6Q(|WJCd`&x)^jrZ<87e_Y3^J|f5Gf891PTIm z1a7rc3vPe-@j9vT?JF;tz?{L&wo#_Z1)tHqcJ<8k53@CivX0m! z)8FgScKQHLTKUM^u1_C+>$M3x#k;X=w38M7>5fM_!ZqyH#b%kNmYGw|wRpLXzewc| z*qZ803wB?2zWjZ~^7cJ4qlIY)#e!3!{{5czC1nR?Ocx8|b7O24``l07oL+s^jE>qQ zQ->XYoPF^1SEb*7^$L4DT62C^sr%5s{r77hPRF|Oxl;Wd9c&+o4<3~uuIJhXzBkXL>Crfn9pNahZ{+_q8o_we5h z)%y8Tn8o}TJn)fznr&eo#^e$m&Gg`a24F-@HF=Ih4;8?ldXgCuP+Yf4a|>t8ZX6*G z?Zyi#ibkyglbn9@;>$49paL)CGw~;OMzLCc%N6WCj#@>TNFr%p+iTr{~F@AB5V%{%2G;}%VTg?m|` zD$Ixwht!X8&^0DHA%+HHa8!wT{EVPt2;O@w11V~RBIr@~Fg(Tef|WpJB`jS8)XI^6h*;_ z5t!1T*lN;aL{5x$XH)S5cte8b$% zITo&6*!|I$Hja2=_3)8nJiVt~J`J~?o916=@o{5XVb8Vfr7~{xIq7%oWR7<}_wvTL z{dcw-yp*9sV9tf(`3KOuU$h{NwfJa4dD*f{(`SDCri4fTHNEV7@^Jd{22T_;-I*b` z-#)@;QpqMl2878DOlou{JxHu27%>KsDhUWVa8V610xX`0Mw_aXI5a9pa}trg_Fb0G zyTm7i`BhSf-+ac1(O>}+JTMwy4TR_vPO|S0RX2J&OxWu=Zu_H zdaRk{MNS9pizVNA=rQLXT(!c6>;PSDmk(jrg(*SO>QF#3L4`cVt%g40X9TR{byqZJqbmG_mwn$qia=iJ zA25Qi@)I@j1I;%F1m@t2cHjg3C>7>4dFaRdM3Y>!g#tzk{OfZ<9dtnim0cyB+nCvk zR~RQFnQ4^^dVD}Gc&UTZPO1p9#TZQw=QlRZZ{oJ@;R94*^917yJX8l6O#?l#8G?Gq z8wN-YBG5_wQjq1Tu3S0?0b3*;&SwkdYs?0^HpZ33Le@9ZfNldZ#@I0y6wtBQJf+4I zl7}QJ%B!Bm$6t;I57$1g&lV15MTeW1rZq?zt4s^418xpkW?+p$fer z(`z;SC!CHE?nnBW6L=a(r4$4T0zHYq?GNuw>mKTk6WV9k7X?53mg~~HUT|f?7i+t* z;I#RF?UK*Mx^aD)=o7w>_`X*xO&|ZU8`9OYrZmw7+KdB6e)%Pf(!adpMtd|eC+Wgr5CYahd8((wPW$9CI#~%*O`vd4gyQlxj zYgeWJ+MrEeCkv2&Sog@uh6=ro06w>m3N8vwtL+sFS^8*~(A z3Th&j#FDt_0z3J{tG2}2aa}a)qBQF4v&|~E1?F&ayT}OIujGYnK^sFfi)`!}0sl1y zVTo%jT!m1`Q-R4Sfrp9ZQC(6L$wXTd+vAtkZxv|}SJ-m<1D2459ia#K=Z^d74hSb; zRc(?WtBtGXkhQI+|MWL}C61WPO5h-WMWY>*qc-XUAEpOI?FxU;An(%PVBZ=NkQC*> zbd_z4nl!#yzl##Hz#nzA={45MP#``SGq&GFZWf{tXAk2tgW)h!dG2+E!XoGd@ z_=nDV0-Bm#R8KN73gt~0^qQ_L&jFvy98f$&AGLIdg0_#*7jIJzZ5uQP!g@w2_{i&i zK;9VO3;4(jF^F4%v@j5@?sIO}<=K|3olXC|$(gq}$&m(J-U+0C7AZmK;-6%hGLag8tqmWprUW#f)k zh6R1Se;i}r>M~H6<^2>q{LH9yxJ=g8I}Lbl=+NP)(TU#)>nM?_TnYjOfu2F&ufDOR zD}2SHi9Tfs^3}7O_c_-<7huZxG3k$gU?m?@K2X`hUl6DxaO1oSxv}ImcFK7CPEa3j zVjHKDfB3D}rJu4Kh`zwkL@)Z$>Ne-+%U%O=k*6 zTjc)l*PHnV)H`mOZ}Gh>jq1{G6bxi>fyGZhuNj+V{?RsOmC`;0@S*a5w@r}md&RPZ zQ-+#c@EqDdc+HB0BQ~2_K7ap?wD-VMO*Mbm>}E0qAMD$IB}S?b`uT@EhL|LQN#YZu z`h63V_9k#1CJCa8(@4CShPrs$KP)(R%;>amhCL<{{vdINfB9qq7-0#bT2e-gL zKYWM}GUPuj2Oknlk?v3%bU>7KWxf!ReqsVX`^N%ZS^##}ZW#PVnGIX_q$d#fFbD+| zPlJ&S=yOV68`?rv_#$?SktPcLr%xDno81NIiuiypn%41fjTjH#Denk?H)G~@g*+6& zHkhIL*+7SMP5(@~W$_G{+o+P)+$;$AKL7wg07*naRDBj`@(T#-e;$hvVnq-%BU`|rqmavsHut3z}Kh&Y7{~97j)&!H|#fdl?=7di2 zmCJ}Dz>O(b$atAWx>LY3(9y>**3=@Q_Lw9_jmt&cK#Ilb)P*oWUjf8nMV$0jb60Vm z`N<7Y{oIS6KZlMSO=~vqiqTS^0)b@C<=!cIu>?PM(_DT&G+}J-d@&L~Gx0^p z_rG$Pou1S8Ct^D=Ejqdz0mRIEUc4avpEs{gmrQKNlOzVRB6wQHPuUkNv0>&iJ87yL z%17+v{iDaK&n^e*Q%j$oKXvrXCm5(fgW(@Ls@X}6!T4TehaQ1*N4NBL8G1unm;t-! zN@*Vg%cf7_Aq8hFCT zkK8!7F{h|R+zxg9Y`aeCwCnyctfeun|-k5WEAAGAlRNln`5TgU#$v1ig}@2!2awmw+e83E)sJnf-8A4c$NpnVpkf(RNJy=|3|=^MxpH)e904-pEev;WjV@vNy2{zet1&;>cy34M_Dd920% zm=yEDuA3G$Z=(#2mjez8oVF$$M{&O^`~ZNJ%6(!RxCCax4UED~u_c^9&BYishIzGS z=!bk&JN{Wv4UA(2fIoR*m^ETP(6DH#UD#;ug%R9Om(vK8f(g1L&%TpFMmn4qs!b-x zXSBVBJ(waVhM^WRu%0|>gN7;P6B_b6c#gY&K&+GzL%{DgT+9CJ8U0v87^(GB%xGM3 zfHq^p);rdkPB9o10}KrC*^3)=j6gXug3b}f3`po9k~F{dJeJ61?XTY7{6raD9OBEE zF{9G*#j^;dE_#O&;kfYuX97-io!8r8pRr7wFfPrTjYl%}Mp^rS{oV^`6m$+B@I$eh zn_;|deZ(+fWUR7fJfh>CD4FX0!27C=hlW1)*SLRkv|IuKu7!0R-5{5`AW#tK5d=QG z#uBd$&aogF{_EefB>mhA7x?c~J+g2$H($B(vh>$Kcx~#XO+q#IOY;m6m^yK6`sfc| zpWgPYxn2Lh!K0);VULvRhDR}Nw~dXTzklZ$thhv4%KRVoBN@+lmSGZX^0#J(38FI1 zAP^Yapl_G+58y%gT0|=ZUVqKZ^pUq+-!;1{E6@72p10tV^a*>U)Dn9vQ`5^|ef_}^ zap7;iIWqGO#@RPmeE+u>Ef|c7u9bbS{V}=xvdJCo-!(euIAtx97W`VE@Te5WL(PWc{Ut^=tMBu|tO(FghHOEM>0tc#5!DCEHf zG8C)$$v|0N*bQa*_e+zNct1Jk9&EM39>-HJkpxwOry4<@|L6zot!!3HhGJOun*VH9 zbo7N^rPnDWFmD6}R$_kWVsShvZ-mG9A5J(CW-#S8+iW8@Xqc&@pG!*Ag&1+t3h8v*jpPc`cMRK$(^Il*VV#&Ur@D(=eZ z4Ltgrz`)1qMf?M>z}SXW)X>QkMln95Hnkqm0WMEuqLt-9M_*%n|8h);(Lg^0xBF+aiDZ3w)X4{f{Gg7%d*JYLV#^h8;`e6D~L^5!#9 zwHj-LHcHUY44TuiK1N!(WOmmlDBv;5{=-Mo`fZORVimLIKia(G1CLzP7Gf1IVmfGQ zLxE^|55%~TC$EiH?~nQ%d9t2qAOsnP2t9xUIPArH0qUMXqBvR*Cn*yZG(H1 zN9Y=f8Yg1>+3T)KuU$QJB&P4EIq|ruzj))-jr|f?M}gVp#w?gRA^pRT+>n+|@9ca) zJD1^ndsNw9+m|d~xUlc%aP6F9xO9DN5GU`qv{44%=xjn6T^?R#lwnIlP1IF$Um}Y+F}z$CzpS@Yw(Zy=w$zf+P3j_lFVPeX?4eM ziW&?KH_pE(ec=07Cj4A$TER(OPaHbZG_!}*UaTsTDEijDf1t0l3Ob$uiM3aaUlW6Z z1``4x;MYeVRAL6MfT%&zNYd|_xer0N`?HtM%DQ9D zM%4UPsXXUOnNy4m;EIi& zXZmGUC110ibE|I_gAo*Gpgm|i9TQ3y<`5IFnik+}6rTe$;O^-c!C?rdL3IwiRG0#T1K&*kWqiZz* zwmL60Y2qQ(SMjR7Q3IIea(RO{H7@zpD zaaipvPkj{Z$qTTF7>pV~5cgW$li(Dm3j#fgz&AGUPLDp`%%A)C=;!xey&^s5%8Pq+ z^^SKNUn2W6J57H~gC9?RyjO+NP6VDc@51zVZ@oTUI(b|>)iy#Gav*>F1FO;-ubtHh zdrxL0a)OT^JhRV`PrFfOFwb>5nZZLvk3S%WN`KMY+X%#m#7#hMgeGy*4c{m!eFH+si8KI6ghn8R5<`R%Q#Nsm06chPz2{kG0cx~~oAdd6JS zC!d>s-i|2kW15Y#$cN>+V9CB_>96zoX5#?D)A9L_g_eYZWg#aA*|`= zMi)*n@XL<9`_tAvPr~79eV|6BNFnB8u{vy!5l#m>7=ypgk1_w3Ov9I zR$Lr2)WnzQ39n!$C&rzZ__^XrT;m|)MfF*M=_8|4LkNn4(`X<|-mdJ!KF~tm{h_?Z zDtW1b>(Bu?`C|=sRdbZ`Ac96lwb6$>6rjznT~NFa2I+M&<-sfeC12S~B@}l(5-Zmc zMu4k`HMJ6Ptu+=+G357aU)ws(1$FX_>xme^=`>h(zE9Zn*Z|#lsfSH($HpofmyB#+ z+ht@UJJ6VJy{~LgClIH(EL$)$jTt@4xeV_BpA>ZP3cC8@2}ac`m(HO?FI?7b)|YPh zbGR0hL?XWztY>2x&wd+M!K*PU$gnYvLyqqo|7q{z_vHSkMhLxoAvD+FG&Xev(~nm8_g zf9*YF&bIdjzEF*)Yc#dsGvqTpnf&w{Wg3)mZ;+`?jkRKTLlAhnycy1TrI0xN^VeUU z@B^|}3zBSjhUwd$HK(xz4sQ4B>_-wuU=trUy0S7iz#~MfgL9 zR8zgWnJ47Qld007`6CNvS%Ex?)b?L=bbm&Bj>z^AMKF$`2)T{!Z$!9rh zWX#aEabV$c@*gy5*02o*eyCku9N;abQxM?emAF>1f~v7rIX3fnUUUxw^%-r}gbZJ? z(DpeK? zQKm`Xkk5Zq_vo9txLptk1h50Jlz}5K#!iy?i#M)LH(!3yz=gX|0>3!^$c=Lw>DgoZ zpGsfZ_}H-6e*caq(mmUr7?!!(G087}_o6ia;tB0g)e}09X!&LPC9#vstz{#@fS2d( zH_z+IZ`l0i_byGdrkp!qyhc(0@m+q$Klus}8~e2nHGlhKk|g*sV4v;#IFlhZBpIXy zmGEYQurBZ7)aRKP%H*UBlp0g$jp^E0ub8V1%Vrksgd=qyWxY|cIo$1lN#Msz1p_h_ zvLTN&5$^5u-{_Ka{5Vg@TU{F`22?4ffh**LZ^z^N(}O!7uPJD~dE3VAZoC|TemYBaisg=~E;j0T+>QmE4?na|hS`d~z*i*!-gE~ZtQ zvdz{YGiE@kjR)OGn486Es-O==SGB4(hstH-5r}m(OBh4`sF{c{(E6`0VuHMQzj+&* zVRR3E+DO*1SHvjqomhI%QCx;|SbWsyh^pb||A|0+%UOr`yU200X(SFG(-z*sTYAu{LF@4wK&OK$SoVYQtHNC|)L;m#h zt{9k5mvB!UH=2)KYmz(I_;~O3=4?hBZfqkV!eokdP~$^>(fxN9=v8e4{0{~W zTTM+aZ*(gdV&Z^3UC6HC8nu1R74xSu=_#`+J1~RidN>*ELZVZAm96OKO$^rVV}G_= z4JPYd0r3Mr@xz%S_#~}4pEoQ_}3)R%x1YQi2s-cb-W}m;T-=| z(BJ&Jf4KihL&(R5hPssIGdYN6{6y6qsG`dPzmmo5MIF;-{3<`lo#}lvluidApk>s? z7PRXxE-Ydq#{~>#pyU&UFvj~f*VrS*SmF~9#2&Xz84Y#Sc2%gjF{YXuv#@Y#Y?@J* zw)y~KdDoMNY(v9t4VLRE5m?QQGFXHPMF{}hc-fe*wP@hAG5f!2$sDow!1b<&9!m!f zALDx=;{jZV4X{!Zv5feR`)kyhfg*7)bUveT1p;T64;q$-sK+>!_2LCM13>uhHQEHn zYYdfA5GV-r1OlJEM~}m6lCyI!JTLv)%a%0BUcfd=&OUAix=ANa}S@wenjT^!w@lPL}~aHr=< zfiD9chrr>Z&Dlvjra>FrI%T1M@;UR78x5g&HH|CP(qipwSs-6TV#`E&Q~ zNC%!eIz-wlwV$}EH6^`jWwVa5|LQQE5H_G->Ii0if**Uxg zZcboI?2!ow14rXehY`UvwNP_MIMM7y=vm zY`cv015>DV1B_Mjs81FDVymr@X@EXXWmm~VD;?ulBxrN|8^2r5n)ZZUR#V!Tsf~S3 z;_n~X849G+Z3nOObAGqVq@7-ie;5EIs6!4@$9x#3JR9ZG0SMsHM~D^1POKB^{L*rw z&*@qjC12$G13 zQ)dZiJ?=vkyx`OO095gZ+CFPjn?szvb_FU#t5zGZ{8?{x&*Rf;@lgEjWd!hajW68a zjL&&vosiW2!|z|2&Ko-_NTogv0>J8zZ6g4%E2Z}kIL|i6U?a$+b9>|T!ZTUn3HBcO zfZbQ{1dgVa&!53P{IquhBogq!MH32se{IsKA5Flt*jOQOs6k1{a-vMNZmqO%+}=B% z+nc8gw1UrA+hFnMufHl?I%!-h>>a~%(4Hy#A49x}<)J+X()t}wc8t%bGUOe%%u6$- zG;b&HseBsFu`4f`n4W$4#ltye%X1w%@^rfGp*=10_cG(JzHD)tT>cPB|IMCTJ!xEX zK15mjV2STjy>FQSWa2RXZj01NJvm~3(DA=r>~E$%f8P1$|NNUzY}}xM1f&v+B2oqy zF-wHAkIh*g&gbbKLnAO(=-s_3se^{ z?Z$zDnsZq`XtE(#*4{txljiPtV7ujFWDoSve%b|^O4tFLSghil0>DU=)EL@*{q3h?g zOI^?EdCgUwdkV+8hxYdDy0q#z9wYanMVX_OfgNKI`L#oq^LrHawm|fawr;$gd}wPM4ad3#tm*{ z`P@sWfb*e6gHWTX1SZ0J*Y!IYATf|b-1`=q`Ab4G?L`0)90YP z=TRQowJ+^_d_RU;E_hs%#HpS3L8sOhPFat0p}`Ip4%{-**Aa$;7Ih3-UCCqI*icYo z;(8ylb@^zscA;n+YoGKH^zaorpb7*O)Muqzmd-T4`bcrK2^U&T|LBTfg5gH1#v(o< zM&&>82NUqfm`IQ0ZIG-!P96$i2Fdi12cOaV+yKCbS3EjK>u!`v-Wc!?s$d6e3W){H zFj3Qqhp}edl9s=UDRMy@1tn`-rYGbzRGSNfWCu+a@ahvYW*{#GRHV5eL_vtt7B=i6 zEjH*n$Or{KSMZg3q!I8n@-^cU*MpAh*LOX0i08OQ5P-*H)98%PtlDGEV2Z!O*Qaai2%j)Hke8N2e>tG zFL>+)UQ|6>0t4WP|5%G!yJvAJju!-a5P>hPw|jDf%ChN`((A98*&vHM%hVseX>Pjw z;@~K<2izQD#(w z9^beDaTz<_l<*JoM>b$mcPWSUbSSFWt6jo3DeW3uLh-QNn)oVL$pT&cx)J``yR? ztV5d|gamZdi4GLi!3|gKCI-o>?HBRE!sDJk@tY9h!RYLhW9I^?ZWNj?ne3kt1nyPKVutZcE=by+VbAN{ypUj)E%=RLu?2(MZNn)%{w7wh+#Rc`d_ir z*~i=Cs+v^3wRvDClee@{W?X}w)mApjG$~x0W$WV&+JrT+L61`y$tZa14Rg|UbDDef z`$%B-v(LY>Q9G@lJbW~**)p&njIH8&*dE8$gvWv4)THmf+K#<0WzY!V52%-Q=&tj! zQRdXk`}UjXC7g^^N`=5Ud#oOQ4mPEf7(dxcd9nl94Q4@<2tg8s$v!4dY{2_Ugxi=W zMo>pmS>irsa>1npwe*4qqDaV)gEiXiFDvHZ1Sm|99dlbjvzO0J`)QbyVI2H*9&Y*& zNgDwmM+d|sOpIpQNvX-B$?=-J4|6gmoqTTC5%OyWuup^I@CnP#|gS>9N*=>_^AfI>muCC{g9DtH(iNTd z)#Nd3-Lvh}hwB$i59Y?z;cC3Mh_iVCI-vnxbQ>3S0$s{DfM{oojmGt(0?60LOm74z z)a6p?Cy9pH1#`wrhE!{2^oV8haB=be_kuyp?Q&sX$Utq_g0?XmdnFG)U7BOnxa$Qk z=a7jSH&z>l4R~p8>9sB?5x<(N(&Z+=s4O48OOG@J{_2(Mh$Fz@LEId_ZnRCoWlfJ| zLm1l+i%MvaJz`!0b27KnFL z#M5Hf!;*JkqoK<&Ho)l(mxBWF;C+Eny%*8WO9ZhVE5HP=z=z-j*tsS{xfBEn0$q#% zP9gaA08b$pNjSd$su^kdWs^tJgszx-_3SC>l`EV3H)>Zb!Z11UYu~dZl_zlw9PDY6 z#-(>O>@j8E*gUXJk1cJKnK-U9zde1TCEeSwX9%9iheGR21e&x_2H%`7<{~1N zofdwoXga%slruVXCim%qvB(o2V;?%y2yyFC7m`X!OqmpkkVXUYV5$m-jYwFngSwIz z-@5g!pEhvc!;hr{_MvQ?)&UKeBa&ZO8`|P_Xnt@xowmq+P{VV|yKR&=LHPuujg9z0 zTgZZlzPQwe0w+f$5z&ptaa^)K5JOAQMO!&{YMOlBICUC&h5K~ZF7-_uiXS;e6tMt= zFWzT?uH-Q$q=i3Ed!cfpN&zwLtCtMa3;A5|-ml{?1)3=HxhyO2yEJN(txlT}9k!7L z7nAW`7Q%E@j6ucIVB-r+5k;EF=g#Buxsvz31S>r@2mL3bgtJ~VK(0Vmd?JV=>&}is zc_|}}0M-m^%04hAF1M-r=e2gWC<@XEN zO%^X1MlIk~iCXi>ZnbZ{?-A!I)u|8|Yd^!6EyVBOhGi@YdW5#NW_2aTx1y&u%4`=P zh(pIg;i#BZ+!8;$}gh#}W9}mfi6ub;lzyVeC2SXJ2q-$J^Ny1Ap?_m#1^ij-SL$vAWnf5CmSe z^0IWz?DB>AfnZkW#TPASUogH->ge{mhxVjn{uya2hPISBo7kYol03#ED_e2B@Dv0F zhQMQX?0-{A*(h`RmBKHR*z{4#z!6wA^ZfLZC3+mfz@>*@VyKSI94fu-L^qmn=kEKk<)9!qsPkq{=_(8lMMb?Y(lwb+Y?PNa|CRxrACzLGaHZv zL4}J76D??xu6z|g1|o}60FfvRDac@i$piZXlPMJ+#0CLM1+&l7&gyKt*RGgvr^;Xs zyvzXkVYbj%%;p1VW`~SMnQE-^Ut+{!`X2ef%dDF+XJKJ?9Bsr~~r#k3Nx@4$2AZK=nbQ@ytMhmoP%wm zL}g@n*7)$_lmyd_~RPMN4ihs;5(qe+JHS`Ni-W$tl1z| zQ_>)cmsBtTO9rLnFwS+X$j0OavQ%V)A-`UX_A)Y5fX3lGmE=W>wkGKF{*j6-PbE-otTfYV%#S%>6EO>1eBNvbgvcRY2^p!B z&OyM|i1#e4mA&*UHQHt($5iW{B|2F9$BYYbi7AT<8jsL_;;K$SHvvXwYL^w(Y~ST}vqlv>$=( zPc$dC&Mzm*oH78GPM?%sy0kePJx=M&2o1i=HX4mSCq7~sAtPFx7RlAU2im{UZJTe^>&nKD8I`WG&0?Kc?%(lbCz>^)HsSQJ*Obj-r(A=kd>C+V zIZ@`s>xi5OGAAfedrCz_U8@9$B~bXOzvya9o{2*vi&x1&aG@W0EN+{@JU$oYrD{m7 zE9OnB=5{x8(gHJr7JmD9*k6aC{u@gl|LKcNry6ET>|(kkqeN=RqZUq@jP1a}YK#*r z8(z0zs}!H+3O?8jU6jWhl+mn#eO6<9u~wK`a%{Mit5r;lM`6dB-ct86%LA|cf+aPga$%oJ}!uk_wAQyWMLBR~Nf*D~W5c-ROAQCyxZ zCKGZYlH=ETLrh6g^sr_aw1P%%i_{-~>C>35J}EkscLiaHrpV@kEo_iTB@f@jm>j?T zVEA!nU0V4c4 z!kF_RYbzENhrq^KcCnWRDtRXnPhwi*L!UG#u-G>pm$d*XbLN+gWv#KX!O)1~pvZt-4K5gG@ZNljtuU=J7lWFV1yRxfhO&JQcgVuTf0Cq|b zda{Nnc*8X_)2Q<3u~V+r(&>}a6&Fu9B~K>|?zYFP?rhMeZe)I0Gk9hS99fd z*l)FxN8qXXpTz?g>B#~YAQcW~~GHcgAD#p`XZ{vwF z!jFhT3}`DMfh>yO6lHU?Uex8KqkLSn)4Zh zoI3OC8vOkC_+;(vh+o~JBThXwftZuB4EnzD1=eKwz?tKIp9F)zCm(gV42OF40_^RD zXpEYXnQ6TWO!2!Q&^rj=bKbkRHsf*6-)E-+moG)OKQ?Dh8J}Lh{L=O-E*eJ^foYS* zrq{0So!>8q8tfmraqdu0(Kh`9xu>_ZQD#zuPNygvW!e@;MNtib-S*g}r;f#6qqTt! zZB8jCNSt(K@Yoc*N*O!?Z@+n7V@{Q6_vtU29^0P|JaugFAP%91i!CA5sVAQdq1(fv z`pQc>b5p^6p#X$y?S*yO{NbfC+oa&WiyVLeZcO$rBn#$1AI%Eu%X0=M(uZ2 zqajhk;^U~Qf-x|G;tau0@fdUcE*MG7NWwUI$f~7tF7bqSHHFMnG6QWTlH6Z+MJ~}f zxfkLXhtoA*EB`S@2D`$4OR#2&S{jwO+GYnD)E7x8(9TM0?mzvBG7sYU&9g)O~?iz ztzRXb@rS%*I4kGZ#4ZGbuogaTte;UtXAK4`D zj#v^$!~{rBD}J)9jY(W<4>=Ub+J6k8%^Gbyim6seclkc|dWgFmRDp$T=%8KGxn|=w zQ!6?L%jJX!j2$yNEnA4w#0<$;w7f{Z2!v7Xl}qQGP)|1!*KOJ%w-8sn8Cn-{r=Vjy z$0LRihm1wl)z25ix(WFh=;SqJln zkP931yt{|?92jDFi~3$fphu=y7So9Zzq*uIwOf7v>k)MhyILY(ly7;r1S% z`RxWA$N8c~m!z}LE+6d-MSNb-p&Ml$*?TbUKYVm3TyCV!mn^+Bm5rmVE#33&_{=7h z2X~j>^XuzmX%Z_)f~dxocYX9VC<@v(pahHa7)gT8O#b+p5yDo3-(&+fop*ltt|Wcx+Wj#3yxp$%=YSBcgT4_0!- z#p`<>+L;af4}P2!gKW6GcSM39TQ@Ck{wa6i8DSCn*}se(ybVS)&40ld z@gq*q^ZJqB=o|u7N+eg4w*j))MeB#Dj7S0F^IDkON=bpVA$~(%>Phc=sdQCzDCLA! za{|^ezorj*u*hcXIU(ZL_)L||vbM)0^wL&vQx(Q2Oh3sV1d|SV*#O4A^SP+=Nl&?U z3IbYN-oO04k>eEoSUkptud9MW!J*}5TwwjNr3w+jSjaJAzG4GQT^HP^PQ4E!b`VR9 zD4&n91kv1A8+_0QG8?A<@%@L>9b0ySP|9f#SiNGNaR5BLh^E&B)JUR@8#_8JnBP0! zT9BI|6Vte#*&q z0LZIXUDkL%I>`Z?+WwXs=5&%tC$;5XJ9+r_F z*sz%#MkLMFEc-8R`p<8en>b{i>E}PShCJqSTald>UZ%*1K6^iTr&Au*sel^zF`;C_ zng_bvMk8Fmp!pwA$uHrCwmgliuNS-*qk&}Tunm$VVl5TFt|Rcb_|=*SUxV8ESbyJ_rKs*ECWaJF>ujDiTk&WQ3HO?*cjT}_-erCpD!9Pp%A8q-nsyTz#(eD z*jp$RZwmrFjX+Z#W&Wy$oCe(FVEO?&nflj1b8nMfD7HHSKXCPoG(H{yT2N^l0(kt} z`#*bM$3KDP@X@CezHr&f=E9aX%3M6LIf?hD%y~S1Y5Mu+wr$~yqJlt^5;INO*f!je z^~P&wrFLzU7;f7;b-v}7O-?IkOis7A`NWoX`->lp>+P{L3#UzNi|YG#Jkb_CL#gDQ zAH8EJ)fe3zionBms!Z7^lTvx~*uVqyrpqr(A6@szzy*gB8`={xZ+|o%lQdp_BIqW|evw4`@{!5+>_n%H=-^2uA$U-mLMn%&uh=(AbRoCyf zL0)sh2H9K58;NA;1ReTT&7GRgpU|Ao6kzv>3cLbLDDVrI2@B!J_mAOO-$WMguZS)1 zxPpu~`t|q)s`#JrN83&blrBsP3GmeMH@e%y_yrI9WOiHCmd$CFc5%17npFNyj^0$;o77Z7h+)mc8ErnUi`&HMvwg-_ zN@pOT9gSElj}a^)imf7+-RKGz&~+aKFJlT6Y*>j{0hQw1C0Xh7Sm2=ZQok|dG@H=> z|JZxeV7spDJnWDl01^ZV&NB!SU=JVxuv^`drEaog$&nOA<&>S2^dm#%k1I-5s^Uu7 zIn7uWL@8d*aPU9oN zl*`0IxG*0DjV}R}U)O*X?-tAS6Fpupncsm7$@=+Ciin^{1p2XHxwHiqJr`Q}+>1~* zu8!alD?a~;DPI&sYR4kfOGTg}u&@ZcaIpU$^URMI|H*rYacB1-C(2*D|F-7u58x2M zkTIT@?>~z}Pqj@SfvfQk#D4`hBl!KN4oqM9lr{1nikEXxW?3J-bQ3;aV@QpkDOg)Y zV44WLc<|^n)y-5_y`#*z;pLkGJndN9^bz=(f&9|({L+g9_ycPCJ6$yn0fE;B zbO*Fq;`vuTeUskgoTZIL)BVNmH}pQF{qGIu4uA6k(}fimP57ohzf8;Co{;9Z{7Gly zETu&diSnwXc-ed;Ns7d1Q9#J)Ec-n>Zt?Ru%+voYV9+ytqif?Ag9{xIwF^=9Qt@X2 zM3e9y`lXjN+Ul%P-bC_ZJQ06~Sw3>z;*adzPl0nber(6{_-}F+hOQJomiQaI5>*9!U5YdG=2_bL&k_izqJ-F*m z@eKW6q7vmOiJE-%MYLG3Q&QH5yQ zN14)vH0`1_<;j(LBv3E?K_GJH%p774=oL%r-e>0pT}Sp7Ir5>gz|TH%!c@rd z5KFX7yGxWC*6$_jOL_L0WAux!$*EU7oa2%FTuuL7~~BJ$g>b%ZdU1C-S` z*FwHv3tiH0K0lX&7bM!%Lp;f-g>hX12L0^7n7x0FcswpJYP1!BaUw8;_j$j7_iQ$A zT-JRL#<_`8iu#&O8}JcV*H1}XRkKzI{Lr>*o4c;BA5%6f{Qv6iVSHRF56Ud3E$g7n zjRX4VB?kR0K6|RRioi4x*o!++45ihBGUwi@&)<1%bN#>${hd3K{!#gHR{ZCtzPIPz zn$Gk1=hQ$%O`HXzS(%$rSic( zyJ*28lI|=(j~J&GIqvupuBtGkg@_-_))jG=;2;q4Tu)_|$J{~@KPI6w`P{b(zVqT6 z&B;@zI}WwO@4R>b>~>K8r@2l5UbY?c(7!B#amdf=u^YyNWAcO@sdvn+4a#@or#!yU z(F4neIBiM4=pY6!m&uqJ~3J40FwgbR~*-N+3)L3${H-%mvbQw+s#QdC=yBbHuvICw|@Oh6*fS z4&Bg(%%cY{Us2h}cIxzLef*LL^|B-apSypD_lC%6lLRzmIB%Lu@rJ_gor`g(a!DX! zk$03~E3bdbDCZSvem(#gryDu-5Of%49v1oF6RMOKk1V88P5?bQ+>)27TWYS3tv9}o zI>6MVJ02S*HQtKAco5(xRJ?lV{oV)s<2&p7T=UwbAN|yg^D-ZhMa95V`k z=4v0@af9Bco4KKjH1Pemm-pJftb;N``zV)Z>Oq-{OrI*=Mc_2v3>m`h_tNLB*KW#T z?K=qk^SZN;Ij*l(9`tpANchbS5^^oGZ_SH2!mpFK34TOCuo?xcJBiE?ljDiS@DQ$@) zIa_(@q+B-1OH)#2+F!Qr(q`A)-CImY(Lx;$3($M}BVTc0-^P!56oyn-62D-O-;O^K zNKGm4d8e)<8B@~_E0hQAMFmwJl6mr_H_wUBWB6Dmv4=m3f&>w5-$UBLNRup$StUu1 zznyFUvCzivBC$EnzP5bw;Eeo<_(M$pl`enfjC|_KlPy+?{cEqhy!rGU3;3Fx`-Tn3 zn1vQ6c<2yQ;MVQ9U)jjo7Bd5X&_wSrPUK&a_dxkefsmsQ%#!ZDSRQ^-B>YV^X(m+2 zHg%;u>6XVxOl`d6qdwA0>>}c#!v|u(MS{8FUJl?Qe_~z98*JgHXVQ)#qqV*4G9L0# zOJe1?&S95~wXG=vzQc=i)LZVH^)5Rn=wR-idTyw;KJu6oZ!$QYlYE_5?&~lMHKdHk zMGse`FXVz%_94>hZGE6B>0`$RdFr6fho0&8$5zVU9$Y@Z=kD-AAY;rP2*y5s0w2+L z<)&fX&3v`6o;v+e^W@9;-5_#^c{FkGmx?3DFz|ChIoPFpOj*g34?LNdMIJQYQHG1H zJY~oud0fu-GmC8FZbE9@5Y*@^0wX|xAMwmzUHjfxn67O;2>j?L>p_|MAUzu^{2Sxn zxwqB=XM?V_UV-=Czqq~s@AJGg%{$6mj?a=E(qWmsZ%pT|0kce`s(UyH@X5rJ#Y1U# z-%ty`o0+q|G=%TQhd5x%vk$STLUCSl}CSdc)-pbG_N zN-rc!8(hYPqH>J<@wYhW%1ZR&!BRM(dFGyQ;!2noTfOuK0dH??uC_e)9sA5hG~MNe zgOBok7}8@ccsSP(j%%r)mUd&0mRZ5v+g0vprY zHIX?@68e-65zX^8cv5e?YDzW~q}|llIx)Uc!2)1#Tpr_tqDEX17!v|Rcpq)sb(@;o z@Xt-P%?APgBlL*J~AJ(%pesM?t56Zm$-iK%TeJ;YXs|a`E z#ds)f@0&-O;{!U_Fce(J>gN;t4-YEzyRNH0#LwKYowr=o+_t6uGI=I0)13dqVfj9D$CibSZPtN(8b2@l-VPqC2W840 zWqpIiJQZ0~ns;$TSHHg>LsdpxK#3w>DpDPQG_2~|dbnH)ix{^Bn-q*F}# zqfGiQL!QV-$8Uc~Zi_>cE=dIOW-O8@@l#OWt$g50EnyCzq)s7*2^Dl0f-)WYIrW`;cS#qB(Pd#br zj9j6vqB4}wK4_ZZ!k+T%srSJ}`q(AL&lYhSf6NI0h=z@^GkP@9jfrJ~?!{o?2AC&T zQ=a&Rv^uQ^uY_^Y{)3Ne<+0@cP3ola+<)qn4jHoU7=rE*b4?DDmqX5_Qj+$QLy{wY zib~!pC}f$UOA1WKRlFooUNR}jo(nJdB*{r73SPD?Qeefi!%xk(PPPyi6PA zIRoDPZy#-5Jn*gs>h-(`Y}~NE+5M?I0$cyw@WjA>=Duy`m0h%jNBOZ!CHMRq%oyAJ z^1&?SUhqx4E((xHQmmV=&2i54;B%+QQz+&X)uAvYxOlw_Qas)ZaM9~P^!b{Q{dj?? zkyiv3AA#i-Qr8lRAKcdeJGrkVKC`g>h3)kYKC{4e9y%Yqr|#W6a2~?l+}}8|ygJ^p z56bY=;!xU2{I}`TuMg{R!%(mtqo3d1_ugQ_=EqIkc-1&=SG$r;`GKw1tc1Ptt^xs` zw7zk3zjKf85$&JC$I`rhxPJ7sHLv9lTt%K9zXQM2dLCWq*i`3`jsK zAAkNpbGqCWfts4;Df~D22^^Lovx{F8Vv8Pnv&k1Np>SqX4BPhD2Td#zvtPwu`Dj}5 zOvF!lHSRS-3wgBB=5eJqgwOIl60lKOqYZr zAN5iZW78Nc=?|7Mu?%^0l!qNCXBn*70rBU@iin?LT+0@$R>#GB$N|skll0jaiIS8y zNT3)l#XbaDAGucB+9SaIncRL6NX()>#@#~!N)^zwuhHwkP3R#V-)Jq5pO+xwxm0q5 zOyo1O(kXR!Wx1X*zN3s(Pui!sY|#1eGq2^W_Ddk}x%+p-=3|GwN#N$DT+)o|pv(cs z&Ct=DhfGc6Fus`or1^m{`1~4b0r5++JGZRzx5z>VnDt7?ye2yymdCa8T#&kY(H;;6 zzpfZCz4jWNAw&jD|G6*-xrUFc?rSY3fNU`Y6RB0r@TCVu8n| zX?fK?1n~DhkfFPOt$gCgtxDXCZz^r`Aot4q7kf$@L|uCmE10$xJmy@5*05t~iHSG; zAfNrfC_|GBdBYbhiH|M@Pd<-GvKQ?dX~abGp^Nfrl$R+km{jCbd0LbGh&OnplX!<7 zYS0qrAO;&JweEB9Hs!@ZAO#i(tf zcMlvwNBm5nJF=h8-LR9w*5BGEKW$TOn^@J_iV=8w#2-y_!{!ambysd!F=kc32;4k?gEB{t*Ml-8 zUtRBrG83sNk*FZTu?r#W{YF2_Ba>rHqgdiXm1@nTS)7afW_??|3M*!nTv%Z@m$qm7 z%_SB&DMuM~IK__stLS6j>G(waDuQy2jQoyu$SO{$kP?qDY~%NM(gqpyEj!dlo7Rsp z^<({!y^|lm^e7I>qz{&-^JKR?<@oIj*Gw<5<`VjlBqiC<%M;CY=plSk_+pcCXl49v zVP(R1x)rTCCLIrPB#)-`(h~%f0kKEgNBX6G z;|yOICr=(?;eizLI7=lU9`Y%_Bp-WEUa?>vhgf_d2fR+d(B(WKN%^dtg8eu+W0m&B zhhaRnZ#E(OR?vL=`Tfnw(_!1o^?D8jcJI8ix$H9j*QxP0%O%ZccinXk?!_d%@cNwhq>ql9^nO^g&ko?@PF|nFA(wYQs2}?=tjrvrw};*zZ;o!gW;h=e z+Q-bSca-Vlu3W$;zO&DUUsqL3#5_q^V0Q(^WfU`mgZ%h1m7^>OoFN~&OMwyd)IaaS z!JZwr*nN4x-_P@RbOob=1{pHa;Rc?6}`}oGbr=i#ugao?zK!!1goZRZW zSxb5z665Qja#lES?7XA`j!RBhUzEvD-(2gX7X{!7g@PLN&*ujTx3y^xN&UJ;lj0>@5__l_HPT)T)L@iEJ6nbdt94wtUOZA$yxKEU1n zYU?1tclM*HFxZ)^0HUghiy~rUQ6vihCgAFlBCjr`SVv@r+iD8fN?t#2weky%9{NV5PUVe&-$Q5=yJE-I7=6N;T3e~MEVm9Z=C zQ95L(>5;wd$8voCz@g^7&VRH}&W#E@qHT@2Mj91Mza};&InLeM_%Rom7we=iYSB-| zFE0BJ!PIE~^DZM9Kkw|&F;5FHL023<7Ny|h=ITuwntQk2XuFGc74{4&(ZW!)&RkfN zNKAsRJ7X9t^QTQM_>PwI8e5x{$orY*6XLnoJXFaXa0~(Cid;?WJjLMe&La<^jQQl%oXcQdft_T zxoYLiUk=H~Z|e3MInsTd7J`+|j3tl`J__@&pxQZGv*@Hu?^lJ=eWu>sIA-BISz z7jRI9Ve86^k}PH%Gxvy%vQc#Nny<)V;G8B4*0?}19^R6Ctb^p|83te|@*0N_<%$B1 z$JdC2(BN|%W4{IwOOMBgNR7B6FfIi8{yy*B*IzlV4e3#|^`EbMv>D5E<&!tngEGs6 zYAS3y25^Iw!}vJ#3$k3Q-i!h$5sNy_G;pm|+O$+3$AF3q&X6wb3;W7Xh~B8-bUsd={j z&O2|!M>gQkSGpVa)oYwVv+qW0pX7g#Sq!xtyUAEA&1*OQR%1Cz1Tk~5hznk+3HWg=8gv-<^EC4 zWXvSA;*=IIbfqTF(+AQf<>^0&K__1pop3|EWgNOvDD%(j@QKPDje8rFZz*G5UBM}DJmJ*}+IbzV zDFV)w+)-p5ex^I+nSaO~&be)RIFpo7x$pE|WN>s21c(Nbo>Q1mo;39uGsA<4 znb_pb12**K6531|4SA6uHA+9_+;C9ld-b49fU#uv`TMtHJ#o9F*?i^3X2-Uhmy}tg z#Ydj=M=>hbly{M(2?>G~g}FrNV$ zx03N(6D~%~akRvA)Yv2#wXX;)Fak%GQAAzK@NeC6`C6JEwe8!lsn`!K0q(w`-f4WE zd&WCn^}QYW@WTI4yzGNA40dR@E&8+P4)%SIs((F3fKQ-DUwya79!|P;!{!am^_w?N z66>s~cNds%67(JKi#;fF^!O>?VSB!?R%;c36(Ml^WcpAbNwJeKw8Txn%f`KS(M7a7kkx+^~>)GqI?W3n;Cy zdG zUTVN1i^=TOf-h!<0&Dng#UtJzr2qCMG3)FPNXywHd@>~)u!9!jyv7IfC6)jp9^@zXd@;sy? z|2?bMwMRhv$(-U|D^I*%nqwm)qhsH}@)3UKlUI$r62vTebmdSP*Er{D=;i?d^O}$J z^g+B_R--KK=QI|&hjF+h8{~~Kx=6?7*;n6g-Z}CCxoTSifjv9##G%*AVp~3L?!R}N zUYC{-S(v4R@4esbfBR^gXGkRFqZ7*c#b}vlN>EKojxpCVNAs|Z^Vu>SJ0G3DdbLlQ z^!fFlTFFO#+fR9}jI#j=eF+zg*e90i#|cY~x*{+d1P(8wh`N?w+;`aoz`zA6 z9u}dz4~ zW~Uf*iE%0jUqcNl)FQO-*%NsP8|hE$D{0on{)czo(J{I>L+=am^Ii)iV$KJfm)bty zH6a+muVaw`rPp!vsGP;yjqza>z z2Mx=V7foK!CfTg7E6*6j8-Ae^FNvv>k)%%Yr}gZ~M}F$%FcicXbJ5syfR=)u)oTwB z;Fjcr?(ohUxlP)(&fPCc{^-C9nJJOmN~y>l(5)lpm{C{SQQm}H+G3uiJV$KKFOOv` znfZDs+jc-?UNH4tXl(1Dr(aFs+Mfr3%QvoXc7GZ_9|FYB?%H-9&c!4Dg$~RrX_z>G zATk^zjnFI4BDb7hB(gcT{nCapzS`G!kEv61f*WHHj5X&xct!%h@t_%P119P*A8X_9 zclkSB_QqoD$c!~3YTRQ&;DZyVdtd*#q5fm^nx$ktbg^d8ui9`u+_Y-wRwc;aNBjO7 z#eaOiP+QSKnV}WpkDvOe;bEEDDgvuP;7|6xwHmQClezznEo)}G&xUWUx5%68_T1KU zyq9x=bSc1Ufp#f$>RMkA81q4yD|-I%Z+*#;rTn;@A8pg)cJegd$*#=^5DGk4?C^jE z4-z=VMept7Vuo^Ok@A+m=)o@`>WP-(oKhb<@@wqEP8O`aI-e^K%B24;e4_9ppX{X8 zJPe0A@fZKOL{2i|A^L<68QhW$vn+;kLF6Jw9i)-UHmS=+tETz(v-_I255M2M^5%O* zzNny>#Bq_j)X6FJvFOXbEoONxQeKJ&5&>0>a&}m{(@O(qo#qd|l%tTAdBKo{Vzu0V z5xHQ#SS9zKyKic)yJ|7-@KyFX_M;6xR}zotyZ8f}3H%8bK~~Rkwl2YAKi^`VTSIcN zX{@D`%REU&u;!gLK%%E-<uLe>|{*GM~Hi+J)bj)dbl07wXl-)MuH#|MJcQlo!)Z4&V={i;1twE(QW8 zE;7%DjKU^-G)rw4hrs%EEBf=`;*70Iy1rB0g^i^Ry|=IONz_(4@Aa#2LMFn-9v8pq zuL%hu8S|_JV2ZLpD~Dl-7bB`Cv}&Yl)|WloaYq?^rCF7IFj4Ub#6 z86%e5K+NI@W6nlC$tPFn3Hg{|5!OR5?WZqRmGh6AHxC_ae)_-t75gY*b5d4QnFGJ( zqKH>q)EjiiuZiTyX-zET*njy$2k@=2$ug8Y8K#IfH~BF&q00)=2BhM^j6`G{*4fjX z;%}M<7W8A6h*{a=z|7cdUNEUx6$YZ~l2ecY#iB8E-r?K~Hyv(*bvnm>QWAF(%KqfX93;PdzM zpiI-OTeq&c?^BC;_p!5r2H}zC_LtYONHnoAH<`Cev2M<3%)~@vX9gb>3!quNye z8E;VAd(9Oa2IcRfIJ@QYejnyr&g=Y&-U;VvtWVu^)u8t851)RcSq>0`3aUCga|FKi z(mTyN$4<=L&a@2hO{CAUEtHGb8YdnjFz~%Lz z%%W{+jb}*&SeE1a`s`i2Q(=+wtaspd#2o8Q{+v*6@MM98Bo!9PgYQxlz+9{(gp39p zmpP?p-)_6%>h?}0u&v1DU&QZ{Pl;I`DV`{9(q9A7IveXGWMNf{;=_gOCC~_1U5?Rn z2v*`RJPW@{xy7Gj$4_+3TVCeDPji;1Ij(_L^kGVh=o)cZRuTjHp8j`Twn!&_Y3f|a zU{`#operm>Tn`2@(o-#j1F;S=4<(TG!391j>XiF>2tk@yFb}B%xVTILA0R1yP68hh$zv&vbSdjCiSm3TR#~j#!2sXYf(&f%74q`KGmP!O z}1K*dUJ{X&3yf=BLWIG1~=+`uw--!$~v#+Xx1 z;yL&JxAJj_eznuLI0>V?0d*8+w0->}f{ZA67rfh&HM{egiUlwrF3?i0g0^w6IaAEzh0WXYG9PUh$8WdAiq}jQ^Titi2vh{7 zi$E4cSR=_|GD;pcqF;eL=DLd`(P520(j-T|5OK}Kla9C*H3W8;~D1zl6FlUV|0=wKr=A!fUnV7+TA zf1R+e^eN(y_Z-6p#qd?!Dr3_o;}4hQCNC0?p4zlvo$|F4KMim_!wK0Wldd?rF=`QF z`FmAo?Mu7wY%X2k-@kLu1ef+L@MA}$iJ#!ag%f?d4-R%{*q6kQOt}Gpr6C5r;GZm} zQG<<%!HZOr4roY%1|Y{6)3ejpYi4QriUj&0U?O`rIsm@uY1 z$MQ*fKr_KKxj4hT+J^S6tB?+8t#G&>!H?!}}*rErnHGs|XYX zzWUr-%{%X(Eb}1@esJ4$LvV9Ve9ZrVTodCXG^#hbdmMMX|G8=p>o3QvhOP?E5rMux z59bI)HB}K91p@ln7JrxW`Oi1@^)`PN9f3mr+3Ma!k^GrBQKX@KFi+5_L3CVDOE8P7 zHgm^#&po%=fiO=$?T^VN{s@q3Mx^zdwDjI_wj7Ediy6NIObZtDtY}EZvSfbpMZv`m z6d&4kh*1UkLcM*r0Lrw5HI`!zItj5n5kKhs-Iw~(e-@M#9oIqw`_$g@c`WW|FKNEG;2*n0;EbaYOq@*1JnG<&1+NJp0dKBQ)X7tFq=wX6Hi`qAJVT%L z$VRzv(B_r(@`@JrQWtygFD!Q*?c}ArDr@0{%z~IbRFfrj=l}%m$xsn4X1PKtjX1>r7JftcALbqO#r^85@7w({uS{f-E^sQhK}D2LnEER)*jhz}@o&b-6JB5i(4US#Xky5*Ij z6tS2F%vhk4T}1cB z{v&cod-_p*uB^#+#aoYSVtYA8m-CNYD?TW5*Y#I6w_Lq(Ire+Q^Ztp`&6l4X)*V86 z1KtQ7`He^S4KC_0Y`=bRKF`bH<=k3pUcQXMPT%ofe~~9#d6K-giai2=oI1 z6hlx|$wD2AAADcHeXgnOB3fI3LCHmLAh?*~@?cv$i6w>SWfl{GM6d|D%&Uom&jl9PY}k2Ua`m{1R;*!0FJhb+9ZaSWn(%_6dJ zprC}ar&cubS*d#&BN+?9Vm|vx-t;%hXEC?mqD}BfS|%u$59;>E*U+r-==Kz7bUPT6+`5<$-O6+Ks4dCO5m*2$x><$0wniPFPG$&+4N-w<$qz$@(` zd+ogLAd_p|Ub`N9tF**eJFm^fg}oj*uujwBqnMZzdaxrOTjV3=r15YUmomrNp7{xG zyaQb9YtdS$M-RS348{|uPB-6rc6je8Wi+*)J_09^6HmSTW>KtzSWFw`j>&oz6D`^N z8kl)VUXGQ6DVO}{tuu0ze53_`oAaPaA=fCJ{2q7x!Q!}D0SjRVMudpZ{e*j5fYfL! z0^>no2zQ|1ryta|HVB+J^-*)W@23FP2AW=4?R#CSm!M6-sgrn~^tmmk9=NKC-xs#u z(C4_%>D#YA`r0|^l~@tzB5?5N@#ar(N0}kDUAJD{TzBP$A-OzPUyt|vg?g@F%)waS z@xJULKi75r0)Ldi9MYqjm=gjQo9A$7a zAgB{GRzc2=Kjkxi>y=R|jV8=EO+#rcD^bvVRtr+H(sr_mhD$4M=H$&@k;Fs=UMS#H z@w115axkH|(2;J3^4)m?qP`ic8AwmoAQfL3m)5AC4@`F95YzQnZ{pJ8wt{Dbh`F|( zVBq17JAs|;;+O1a$(v7t;_2WoU@btsZSzc58s|d|6cW8G`>|kR(34etk*CjYOXuID zKc4a0kxxw~dj-l|gFU^RE0%QDXV^$9E^{dVSpSeIckdML2uWP zg%Bxs>r(DPz(j}ZRJ2Y$5kd#aTH-tod{K-v>E~qZjSvtMf#SYArv={F0jlv=1Qr*8 zA>29qL_H|8W(oM=N&Nh-Z7>M*y)HEvAXn&U-*I0~on%!9Wggryyt`LC`s%yQGY5{W zaEmMZ*bw;DM_wJ=DbWY+8Q#IQdFIvO4~O+JTJywa7Pk71_r-o@;t>ARU>5sYbq))G zE7q?Ymc!NCia*WKH14zn4*P0Lrt5OHJ?$i01~&jky{sKszSu~B(};I+ z@#eAPCT3oc?z}fWHkdn`Dst1UL-ejp+);*BaX>&{A)0sGP`=o3(dB0qxQj2eA3k8f z^?amHZ44hb4?q2C>t7v>9)ZW6+wWWoB%EKII2kjam5Z5QU~-P(82~orV&P9nBbhbD zSa&t@TH45;4a(0XoOA_M>ls zeozm}^leL)^yl!0^|`VnKr>qGdp&T5ca$04K^f-2FMso;8Kpv1y9fyI)?EMmFNXJq z#uo>8aBZIX^6m%qpv*k+ey`_vZ@9>hPTIWjvU$Q;ZB+zTg}@c-S9K10Rko^7MW7m?=@Je_4&n{BYIixrAnad&sOKqNb*(AI*kdt=t3qF70tzuhN=W*e386!%4elHvTrU*o<+r*)t?0!s> zLuPVl_O0PlpEh-hJ$G;`oNVph7UMZpH#;BLP@GIvh?4kkLx>Vf$ly=3z^)sRq&U6N z->SP`4)`RhU8rE35F4JbQMKTkw3+fMoC{w?OIW zTxEModWjiKeA;{+YW|5t@1z_?tZ#$E*P(AZQwDOvsY$gC#iAKQFnO)Qi03Mc5dKN| z8uIVxoj@LVJN_T=WPK8ZT`W=%g?(%K(qyLz2Og01oeT#Q^=kKdzh8zS9{bqEUr+m` zAQy!+kH+bE*6pUp`x7%#x+H*T_I=MEiRGIPiy>$x- z3{gkSt$lf7(wF+rA?afOt9wSy$I;|hfj3=$)Y;a5<4MbxTs^qV7AJqNr;Y`9}S96A{#Fb z`X9Q3hJ3;sog#FDCHSNFZ8lYV9sm{~d#tr#8Wz#-UceJKFFGrfEm~!0C_3>|U})G- zk&xr#B9jFRi#@8zlk^WURPvxE^qcu+m3>}9!Lu0IVG(mqVp~Reg!t0Ucy+{ZY@o-4 zRDe&8n9lmmd&*=+m5Yj<4SlOS1pc$dj7Njz{Lo={$7*`}>Jc>r37Z3nx(bKn zH%_|OFA@8J`u7jxdg;$D>Z5J!L(1QuMiQ4wOeCJv^~juOy`y~VzFU>i#x3lbz2cR+ zstI-B$$SUO{?(u$dj6deDtl6jnpGI~YG^Do&&Zn{F}Q+9Kj2tF4+DcBLYM8xxTY9EvnbUSU^SmXmd33Yxj`5aZieV zLI(-RE+2>wEMh(u^E`4NRUrhbM$5VVioP(2plaB5lsDs#a0?~S9bUcj4%}Y3HLYag zFrSPddKAEH_5I*s;s5xiH*HX1FEVp0G_1EXr`-Y8JW}!Np78V86kEJ}IenkoD})#% zi+Xr^<9q~NPJSP1ci-Qv;}jE-e1z?}yOQsBZrr2Iiy&sxzn^dYZR-;8y7g+}5^`#4 z;WzwR&bW?scrt!!Xz88q{jV4u_C3@mWU^o?Ws-ok{rcdINbc8JFzqb6tBFX$^FRzP zi4H^=_tgF1>>z~z!$E6YW{YSn_dSP6Q(Wf4AV<09{Ln75qI%i$8`!G_?FI9i`A!M! zx+G=|{!4Ym%o!pj^@eRtoK_^^0Tae;zA|;Lg;)Ay^^3Jx)bgncS?7o%?i>bj7n!b{hSc!nWw*L&vRxh@ zdYOm~9AZa`DR^ABQ9kPlb-wlp{AK_oxYgbNd2vE&U_SeCD_l?Mz2zZ}4wQ5O~`sy#lBU4iP zDmVG&@6Qs0h1yBQ;!K=TPbq&vZSH`#cAb6Uwj4o24UXZxzUpOmuL^2QiY(=~bPqX# zK|91DFkvd-cLO=M2=7BwFBNrf``A$Kk`_7%#5tiQf2r==r)4q4yGsJ(>hEp!Qd;0S&h_1^!k_+h*ke-!jT}_H2YoplpG}P{5KrlLOl48{>uK0SLq|RISET+s=u@5p<2tpb>3c~ z`iD+uE#TajL|h*^7+-axbrOA}CzGltW!sOj58D?GC&SL8>4eO`mjVvVDSTl+>{-L# z?&-?xY3b#{lIV+PrzZ_XKk@7fJOtn5A6)3-XDnC7q3$vvbASjYBUI^tgY7msH~UGj zaR0+wD&ZK%yF%dv`eVTPfqZu(x~n9^iUOa3=^HE1^4t(w_RCl{ zpLW*+EKG5*v0bb3d$h4afJM{wTbpE!upmmN`lL^lg3Pnd{QHhsj3uR!YpmS_`A-{# zdy`)N*E{+=?(@F1k+xb>m_tM2Xp2g5#9;_HL;JL9s||tLDel-jEtz17*E&OG)Xh{Z zrS2x-)A17Tqk(4Pk0Nu!N=oHw5u{YA1@$58lj4Ud;LrJl7T=48CCMj(*91xR{`ZIq z5A3{Wjc4kIE+dSw3G7W;_xHT7{NMTULBIRFLs0wsPR%*AujpXmM$1p-5VF?7!*3i0 z-6hr$j*;YoDtA>(^!{*oxPUkXy!<`;rLgk%#XXHD;5mt3QTKGW%m*@Q9*wV|OfBSj z=Xtw{Nbhr!v!OSIF@%Cpdl2QVa`=Vd-+j}2PO1ghKp&&x{=dH48)`xrVV)3k#2)=!hIt}_X)kE?dqmGYmyi|7#v@DHH5EvH4#^Icen7ZGh z01x5QuIUmF^ue!TUza|o<%=Kmb96g%XwUSy>}t+_$eh@|PnW!OCzE5gWoc?o>P${D zT0J3~N*KGyX{*hxG9Q0FSW-Bq(qFc&`Dhq%-Ey6@6+q3q;9rC!QtuH!Uz5?06H?my zwQaTJm#SGXQ(A5BZT&mk&7@WTMUWK(^2@5jK*0OuA^@X&RvRS9qdrN?y`# zH_Lr(-dI?#wM_EFdz%|_VhM%crc6Zu1je_~W^VUO`7Vkp0HIP#C3N2F^f8Quab-KL z)Z})F_+D!G2%~?sd|~s>lEX#>=SD9-3`pufRHe|TJ&9l%XT4v#Q`U>0q7caV+UzSJ zPE5oUh|TV$78-&FsHinwoUC)jVG8Ak`-(FOH!>`(W?O9Z?yfmHdKK}n^Un#!t$)VQ zedAP2;H4$c#;3IAJdNaBN}(-?*ylxUMNHL7uZ%ao?p^PdPRqEY|i01|L zMq0+~ds5q+WXY>0~% zurEsD+kJwT+XEi8pcsyoOtrblqJR&b*4`@NuQxbwZ_8;OF2ffWlrg6*$UCgt<(1pEb3SQ>dUZ@dzxdeopy%i%QK(L!hH~>$S+AYeq+C95rZne z_lU~3?M!Aii1>2(FYUhns!}!5lY>vH6vv$KD?#*#n3g85U_UF{&s?d`s4~O+pP~eJ@pEDfSR{hVVw!Ap z(aSjQ{-pWMlOW==<~BFv#Xu7egEl;(JXq4!nRjq}v}{7jQCMnmr2E)SgpOzz^Fg`v zC1t~+2a?s1+}f4eT&parK&2n|MFAHFq4P9eE9&fbsKC7L`gPh2 z5__NHpB3@gj{BnLC;@u0Kv2XWw{@_RV5!tTHj>JA#|(T87$)yCL#ai{DfQq?dmBiw z=B~%^bx6G?US~}P%ky-@d=e58k>ez%Xqep6xqnJ*?7Q}5VW63I{7_uNcZ^*P&r9-DGz~A=5 z%lqU`dh+q(m2w{tu>|xnPtds)M8=ltv*ptTg$nd!dvBLtGW4H=3mlgCmQe4g8T^ll z*?AF9FwzITwW_Hyx1KLfC)H~(E(epyJMNMmH#?&uhZ%2jmcl)T^C>SD_TXB%=M~ij z_Up6Bt0mWaPHxR8oDag?C&S0tPL?_bKNNmb#Uem~WeWW|3&;67>4zsIG#IN?x6k5T zTQ)uxNXJl_%!|JIm%D(r0%f-E>u&biKW(E;`<{CtA=q(Gh1Ou%pY(Wc)+yXa27e_7 z|J(s=@SOH8hAN(DrL6d`?U=&CWF80dJU;eCfoh|qRviDyu)WC&)0DiiR5Ch_9YDrl z42n?F7Rub`k|=OE>5ZSn>)lkWg%w2W$(cs9d=dNMI+|6zdCn)EIy}8DVHw;8M!^iu zuJlxAi`1ui%m-545z0pq4+ztGiM}^#n`vyNh z?6&OCQ8-}9KdY<36ww6LYuR);M{VvTag&p#FS;^p6wIpBv1Jv~9# zrG-2+X2dEv4fu~SA5bWnVw?jH&5#%?VD|oQFW0MXvM6o{vtA73Sbii5wli> z1sf!J37Vwh;uE8-z?Nz|wRf(#KPqbQMU{XEV6D%5kxZujdd-zBbJDp z_`60Uy$x)-lHHt)%KxgD*vM7qd(`)+`_x`+ijV4gKyo7`{k3qJ!AY{Y*oA2lOg&pg zx>DGQr3$0NtvUd?`ac!`DFs?wBi3$`92ayTl6KOePkRn*)pRJ)*kibX)j-9bh(mX3 zQH8k;ESRqyrY)sz;YJ%M-PhD=YJ(&4(D70}jNkjA?=i#0X`b71>< zVF4ci5-bFB+mGpA#}{`s#M~}>jrh8$trZRx{AR9MQk^rO^dB5J9P;>gOH6>2b_N%o zAj#cr@dqmoD=4dab@{cnM>w#f^^t7gv(VGmLEo^*`-r#er59)!;+urseQxKEqqoED zS)2-O4#d$(Vs^XgfxR#1h3I1{&pptLHFk^NEvG%t%iV{w+ONjT=d6Kn9%p&}=Yv*T z9ptE7Q9WYYpbfnbkgI5?ny?ks6<;%`&*bc0sW0x&}&>M!Oc1+Oc~U6 z?v*x%a^?Dbwkn zxpW~js8y(p=6pK%a74+B9&4@y6;=AEHz;tBs5)K*6!fnBac~=}Q&m~{Z=8rapHwO# zPn2H;U|k!rk-d|fG3|D4Ig`kCEWq>mC+5pCqeLY)`tZ?S3WdGb+H_}{!S73`{umNemP(e@ zjK+eqKuQ`RL`2zHCrZ5Z-f_PZ zLDt>hN)C%#wt7BT1Ja-!0r&5x^8zkV&@N-u*E}&jkI&5~+5O)JB|c8X3Gb0we%BX+ zbs`hoyuz33Hu}d$3=cW-ahmvcjoWmyXOsHc@50#NVH|LvHr}Q4TK4K_3we2Kx&NDX zGT@7K^lHsKjsNR=6;{#+39-1*SJs43aKN zZhsUlQo?x{bq7@O#STF)K?V+6P_(S_TohNP6U^=koNea&l+c z23Lxh_mmN%EKVCblTq25X^C{mmb(<&zX2sCtj_@<7{0Ds>%(IBRD^F}oY98XV0X=v0X~|Fa(vsG=u>7Orc7UHGg%p}NkeOKT4l^`gK= za7PleBQ4v>)$--!<-MYLs@44dvb~H*p9wM^1lU{7Q4@0I$vgU(7^Jf#Tq^MPI8tHc zZ0DA;$;Go}5CqS@NNBlAZ1PW z;Mx1#K;#|osFX%{%St##lamK=YwL~eA2eGq6$J0{79n z7UL1EBWx!{RnL!WPCeBtjV^w-kO=xaZlmjQH1EqNl7LhFQgm7axf~`$)s#e$3{fP6 z^vF;nw|H6DJY;o*(jxN=->;djoXs8&>@HKeFIAllY^7^m^<=scl*F9i)s8m7Y* z8oO!=gsfxuuZqw~$2GSFG6tu=*3m#SKoLSPAXs=TrkwI{y=PrxM`i{|tpkR1Bt2sl zIq3XtZ^mo>9LMKE{KRtlo8awty&=PAkteDU(znzr$aui<-yGij#*_$uWG-C3RocAd z1QVkN3)=%+QbLp(zm!PZ(Lx>jz#BfyuFvV=73cFX)xTH%_$VtR=jkP5^ie=*wBOls zAAE|}x2H_?Q}j>H3%TLUuMV1-rdt)hU}rE>)x{wSGkDOMz#2`lU@#prR;PJ!CdOqJ z2Bo&iZ$*hjPHE348VYgBX6?7E&P0!P^KaQ1i8=pS`7z!kRf&#A4zsCvNKHp_8!21t z8`Uc1bQ?|$+!uPSCb_{d20z&SxKc%KuOt_u>e~?1P8pssu4wGEcBuaT6=P`KtR_%O z?D6L}4-qT3?`NNbq@=eW>|O#uR~SmUivSqtcJnEopRhO9*1LhsQ`7Tk%I;gHR7nza zIS5hQOG40c^drG8b#}g-&h@Ey?7L_+y{-4nmTP*e*#NA^8B@stpu^L=>|`SCEWJ$g zS6EN@TmDUie{EZCDc`sO&cn~COn#r>{V@;)!H0|33huU=NLP-c?_wPH=9|6Vd3ElV z=HdI3R$yKzFa6&Lo_BQM7@0Q?z;J>p2bqm&ATapncOK8%ttKKoWexp=bbkPqGD89S zlt%KWRg2+5R0cjk8xM#OaAUpnkQIIVu#UBV=~pt#Ih^Cy&YoUd&V`*mI-Pn`LZkUE zl`+&x{cJi@S(j@fv1NZ#NnjQjJ*cVn`;$y%pciHl-?t7>8c8EcR7$`FUcSi#xTf|$ znddweXW%!Yt~U6zY%H(W7DGK&8(P%cVS@)$iKdr9us`NL5yE2|Qy*nik4p2|Q}m4F zynZ2Ow0oj|qIHEHDWC>KD^I2&=NV>ij2kimv5OqNfj8rO=hVtlN$4Nh-8P%o&1LNLF|$J zbs}S;Ux9K=CL$Lw;(Kyl6m}pXLv#+rCLd2mkBqb5!EYO}_P(Qwq6bdDqzvv(uhZLA zi-HHnDRn#QD6Wn+{S@dTh`my9$h&QTzr1!kC{2^OCRAf35rz*@gi>5`zVi8_CJlsO zJXaYMb&^S39{rpC^eHJqf!ZmMg429j+ANnNj--IdU&_Q3Yx&2<-6nf3t&H);8np0T zzQ`-RsJZY1<{p+;5mH=5C_h5;+O07k)-@o{@|Qdj01;k(p96A` z;L^_N(68t$k&~9{Lv; za)m(ik@jVP^>wQL@iTnoC*YmMd?>sr@oP=fx<2bjt}a8IsA6s`@_eV}f*Hn=sb90KN#l(IZOjh#J* zcH$;pWH+_ng+%Y!|7EKCl`BK@VzV@`}y>P4pe1!LsP>{qlY4-~G9H1k$0#&k?;4 zKs{7K`+&k?(9CnZ9p>GJ>7e%UE9O)o?JddchOlE3bSKUkY4|AMC0*2z*&Gdby-B1w6^YCmL9`wuFth=_YY`DY>6tVBA$=>_}bc zO=j9?!hb0JxWMXZUGUq_OmR(R&BZCGU{*#yb^iEOFN)~Qp;8TzX#>%`x>O`hU+4}Y zgu=AbM)yxK)Kq7omd%ThLi5HpY%X6F<8%Eq?So)y-TQ^McfwKN`F8Bnhfm)oa+awc z*C&?s(VTZASb6FXB-;k7N_`84nYCw<@o^{m`L;EtRK?p8Zt^AZ&w*tlrW8N7Qv%Fv z+{gT9xX{b<(Gz9&3bLegy{ZJgzPDaVGM0Gj^1-g3mKu3(*rq=B$84u#pye26Ta(Ry zxD5whODPkPQ>6B5Gpc)k$@Qms@IBc4d~!Q!+$pjy9A(gA>vyaExKW>;#a;Tx z6O;rOaSD=G!nxS;_BFnpdZjNdbG2&!p>*v$?$>e;&7a5<^KiD((HeX&SN}ysYaft~ z(0p+PG_w1%UhK;~{#YjrI9+%|GZw4-WfH$iC@BwQT$C{7ITZ`x|7Aj|HPDipwjuoV z>hd&{n(6`j1&BBfkiM@T#{-3Ied#U+T5lh0{aLPCut@9LBEtV=?W$|{jOWn^ja z%<<>b6ajXc`p6VBONw{!>e*A; z*Qf}K5#3y7Tg?4dZp5W3xb}lzoiwG#Z4*ZFBnykg@Qr{B{yKh%`OU-j7R~2vk;u;N z>X6}AQoqrR)Zy!3&Wz$TkL}$~w7P_Qs05z42$_Fi>1+%C!|Wnty4U^x~B@pAtk2iBpiHyR#UizEyn_ zJlp^mrBY3G&?L+_B#hI{E^+erpDqPoq@*jHjiyVK*S)9uNP;@jp5IFrTECNDDMXC( zRP^b7*l3)*w1H3aewrZ2xB|)pzzOY%=@=yu-Gm0fwjP)Nn!JCf6XAVSe?FqsYrmV5 z&x9UDg!^d|LVk~Kf$xX2TCT8In^^H+10KNolTfiM;dhEKsF{zLJ!@6{M;)JM#%%WA zByd{K^9@%U^I90@wqfRfDz9N=75;k^TH$st@gi%tAWIkKu5|j}bal?>=68(4f9nwM zjFn2yZB^tMmUI)b>%41H>9*8Pmi2vnln*4vwYPJw_LPCc%uL6@wyw z1s{FjWjP*P?=biAVD)I#kGkd9E2Z_1UXs@-5UloZNLfEfzalTuv1nARgd_mVBT3ie zSOCgr3U2djD_vcsz&37K)>mwVp=&`+z%kgHBSdc8Sd>U;GycsD>r((9J+8uN0nTk{-EC!o368R^z@) z-0EDT);V+XLKdzH#>p*-rKz>#z9mIum{Q8*Whd)5`D3wns^8#z!E+wQH?2naWI@u8 z7m|1!fNXjNG7#w}q&z3F23yfvHm}t-&nlWH*0gErsWAfIXE0;4MgO-ubxNat?#qt0 zOmBM|Uv0A=)rbik)qDL#|KTc$Ve{H;y*()NXn#Pl5iON+9xee5joexrmG0mn^VcCF z$lt*?ySVOfWqiRVXO}8>aVV&ty{ZXiyuJs-*Qwk(!S~Oj+5fPi1*1DY;OgiTX{w-Z zz%~na*voV?MvMHbe$Vp;M7TXPB5j9aHQ*^kKW+Pu{J7M%vG1#9wr3@PA}xAJ&DYrL zVe&tb#5k&$-!8BHnF$wI@`yM5PrK6TKUK|XS2&HDokV1@)7ysN703o~xar03uYEuM{AIBZZ7}#RYAsfNQR*0m3lJe(35PihH zF~?KsWAk#4%6C#VTssS4^|ZwXj&HO!chPR>|5{Gl($qe`9QkK1vZ<5tjDEX(lC_3> zWeW^Jv>aNAjK#zr8wmUZ&r^sc=|J%Eg?{$RHm}o>Xw)&?4o(k+HYgI1(OZbfj)zFe z6Ii+rc<$w>3)l%v8_7!C4vyk*fYY%94tcjhZxmt{*c8>&_GQte=C;}wKS}9L;BO{P zzA(gna^HJp0S7C0Ry~UchQ^8vC6>0YdIyP<*(7k+1+0VCoj@d|>(W~=lF#tiK;7eg z_*sG5j(g19Zzbh=H@wGu~CpVRpclu34H zrKN*FS<0QL7pp^oz2?&wl8#r*H%hj}(SILLi9iVLeq?1747*3~OYhg4oB zDYCjZns6&}nuFo3Wt7V)t)skCV!03gMu;&8Q!ZcBe!n}JAA@|H&Y?N|ZG=vwqMP7Z zZl_o)<3rM;B_o05PKePPPq5dX7_-m&5TJ=6rbuW2F@FF$k>%7cuKZc^d%ZDUDt(Vi zypzr|p&ueF!#BIb;y}-r0gq(`5vp&9u+mp%PyCQ)>dAs2U!I9)zVod$pgwRd)gS+0 z8sxKeBtmm5kI}Qh-|`WJp{xUi6{~h+I7e(Y;&#`zE/L99d(01h%0-T(7>ogs(U z;8tyuOlfbnF~c`B>sxlm!TmCl)r5nG#rg5)c=GvJ()@bzc599oO&J48rZsB~_=)y& z_)!^2(J+f@-}E16H*=E(TK~F6V3d3&T=>su^TXC?lw+ggU3#*3zdr*^YPn=(b@Lor zh4E|u95+nfv8=U?HL}*^uhQKX#Ukd}HkLUfUnQn?w=GSl*3WoSeTnI{5ActSTQ8Nk z8gD>Hh_;KI@J3e8R`lX`XrNKVHy{q z8D0>dg3V&nQygSIwieLP=e^^!N7XUH6Ia9cZKC};q--DH8C{-SN;TIeZioX(2kcp+ z9;2)jqcl=Yn~s?OvGQq^^H8(fB8G1*^R1PdezQ`_I23&pbDhrkO(~g5;=v~X{COkR zn>aURe2lI?_qkc5MCGU$wz8PVy-b1+Xy8$Id{h{aQIi;=aA+?x4zf1#|1IcN1i_K) zvj{@m@*^XUE)bE5pCI+;WY4xCu=}e)9r8+%u2s<4r7aR zlda|?!cH?4Jz_!TM6KnyZ};i-!n3n?VdTFZi|zR*#BoMQz=1;0YtQM%jdl2Cjqhnn z#&LDj(#N2Hhx<$ZVdZ}v9EvD@yMoEhc2yS)l7Wy0_D#`$&{bpRnSi%hze(tI6aLbe zZ<8b=JalqwgE0BEK|`R z%xzaR+x=NMe7R^u>W89+{M|xi3?rR@1az|6D%-X9n6&ClpyZFbCxu}*X&AItBmEvulh2b zQNIsUKdJj}AjFCF(4vnQVtn{1GBdvAK%H(e%~S3x{Y|BNNz z%Z6=ov4i0s0|U}c~;#p=BR6kNZjM=Ce?jG9g+D{~>41jgD6^fB!7= zX9k>1*?FK+`!&{0Q4(Hf2G`WR2{K{Fcf+asST@34B4VxnfFXuAx}`W{Y_VuRFVn5H z$&Lv6Ejw3;yDJPPgSMw?=Jj0c?S0(1_t!Z(LoRHUX39aTM+>u?`-Y}|DP~+&H+jji zh$&SMUse5$Va+*1K*VR4~<~U z=+~NO6thM(I=v+65B4%-|CtGPVWbMjM9C-&Q@Xj1)|1pHGHQS%d2B9M_ShS0B@^fl zoNb@=Y~UDle+BOsgj&jh>6jky0A^O}p3u&p+|-YJWwRM) zWY3v0JgKi5!5~BlBWWl|>$;NicIzD%;)*Mx_pIVwI!@A5#b?bq2&j&z+49{j#{9EU zNOg6@U1K>%IWU5?FMX(V=p?9+@>4I7@g`3Y|AtZ0Z;UgzG`qp5!rAENvqsy5`(Ykg zYq}2lM6P`9_u(~c7OLJl{qDqMhq16#?e|iG^0Qa&LacW21Nd)VXYC>zOZHm;_}|XD zV8Bjqo{KmXJ}>I(oo-}hZ&~;6)?=pU^>vF_Azs)et(H~0(zv-t9l#(t4tVT+uoRy0 z80vXjQ|2-DI{ek$q5CEb7u55JIK5p&0TDM0e8>Z|3QU)ZISi-eiKr+W*SJdh?tSv>;cdmmpgaBsG_>h));<0^tl@R|5d&J{x9W7SMO4b z@FP?33@i#`Rk51JEV9&yjTL@zU(DqwY!eD=(W|*`Wt{gYy{$rh;EbMr78b_nddr)Y z5DFp{q*wAfsnX;KFx>8D_JgIE0b!G6LCt7)&ZHfszAXAh#yVla9Nj*sdzrpE5@X5W zoiuMdygl6FDbT1kx{6hOB|&^KNp4;h8k{W6UQAP3Ku|ipkfI}K_C8Z+6giCoYORSs z$JSdyP=H@>dy%wFG!EB^1GCay2f4nkE|-8aLJm@H(^b3YpM=^HMLYPrcGu=Q#0 z58`_}82Q+`%&mc&WVut`BG6)^A9*N=7iUtmv$vD&uk(=4N(~#>u!_euru&s=4tf-v z=MpfCW%gaoqA2x7fT10f{}##N$EI-{Gaop7h3(jaJ>Mf@>S8w2b+MPO9};k)o_0HT zVqT|7CJYYJea}LcgjNOdq9QLp4mOR{QswPVm{-bY<7eFfX^o*$sVMjPF?Blor+Yq+ zQvNBGKb_Y^sQMUJMiZ3K1p)fc6Lr2R6Wv)J{#9=1LQsn7C3ns~id1kW=|y~zZ|V*~ zi=8>~XNFLDesA;D;suxg^oCX>U&z!kH0hvn=4BNHG0Um*>7jBVPKo`J?soXzkqf2c zGGY_3a#cdmR7DoFVbcH9|Hrk$Lk{wV zif!#!-_t}>)|LE~iTx}>w{M;C(T|bCPQhz_;0qhWNMVdOi}NG`{>{Vh^Oe4iOfEV@ z>Y=+Nu^%Fn+r8^*`YZ%>$TE7DLr!nBklrBR=aNnqd%1#g^B|+Z&vAI>D-pr_X?6>e z-|pH1c7MZukCejiO%#Nm88yES?cCq};$7?7`}U9I?jl#n|NF_%7Ec!Y*38+azr6Nd z>rimqYXqJSki7kMn0Jzsqr@;WQM*`q&v?J#Psa1o*eF@)ddD*0yyw>}1;0`BDyRP) zir$K7)eeAlg>zF^1`x@k1L3nR;@i}PO$->gmOUO^O6^h-CD$vnD4Ip6LW z&g`n`EOY5%p@WSRebqbb0aD)fmLcd-3ioh1tuD2X$70yFl_MqIl`E5G3OS9JQYl^r}$ZCsodcJ>3t2L{Q33$hf*N;YJ>TeCNhI0nQ|fE zRvO^4m_@|A73?pQE%4@Y3xr<&eu`;-L39B8*asVde+M+QJkz7NowoJdI|b~5`xaB8 zziq!dk=sEo$WcM5rud{+QcT;@Y;W2<*E!*cND&L&ZqsPoz{g=Rue}L3Wpz$t;{V_4 zNqxHE`)O zyl8#wHWmbR^HYz?{ALVj^EeE6+?>B?+Y-FY%Yb%z#i@_1pdsz9{ZORuTVym8Oxz3d z5Q=v)u_RHGLuK^9>!bsT>NzTj9+`?i&=9@GbK%#g7|<_SPS+%9voWnrX4S=Ebv^}> z|5V(i?8RP$uL1!~mA#TU`&+IN3vrPqe7+N{W@jYA@t$VRQ8K%;lHAfHlAFIla_`t` z+A#No+BAWEC$F+(pv906-M4RFe$0{R?n_X<(f6E`q8-cQL})x1ycD?xEG8Y!VVFu}6w6?4} zpKe5n-s%YswG@w_9P0D^%Al8te%HUeCVKA%E2o%O&w7@ahG`@#k_!+Hb#qb9j)(Vl zpFq+#405Dv7vAwhby)3zH4niNp275$Ls;p}pzvN#O6O&A)OS`f%;U(`;SS655ucr?y?v45>p>6n_2A(35q)}|wt4MGmh0a_m^Kxw zL70xzw3^+3`(NLDj%J6)v!JF9&$|fxu|whC?D4^pHy$pZ&k?(S9E_Zu(K2H&^xxS} zm?GqACqdQ*`Ozv~8%`cCeUA(4AybG}?vqu9F#rPVn`(At<8G%uY`879)}4!+=UXQN zPe4`(T2Q#SWIko>0g@Xa{7zO3HJ^pb%(s=C92{N3c~Hpl8*9k62oF|AhjLyj^~{0q z9+<&q{aagTOGh zBka%ss$iF7?eVnen?RS5PFBiGOq9#o_HlT0t++r-f0b%HFIWD1YP?Z3N6L$b1551Rg%L2 z?&q`2%e>W!_{XXDj6a9k9`d@nD)plbOAJRF2vEOl#NGAhRyFUDP}fTEB*)?0(ga@$ zeCwWbcy|e)HZk)|{huN^rx!}&zg8He#U&yb`b%`pAKygUF_i9_$&Z#Oa+TE}sKq`! zAyaj#&A%uo^WqzjhA#dhf(045k~W7fbASGAxyngDqHfM+JHpZ@Od86NcG;1f(5c~8 zt4Hu#=~;-XJI27H z0kwf|D$yIbSgoKw=`kNj+Z)b3?hdg~)8(d{vRA;X^)74F8ngqfWf1Q%iFZ8F#IZH( zVOYC?*T(r9iu~|k&CY%c`e)qLGEygj>yI0t(D|kgLNhc&rAnPYp6_psJk|#IL1YXi zgfUV)asgG7=&+Yyu+%}j@I_wEV7Oa~a+RU6mLpW~vlKlROIJrVP~dk=iG zAQ`?F_vaBj?lJE0h&KJNM;|+aZVl50i3fg&FsQorZ!ikv;cvtnQzi`q_$u%7XLk|L z6N#9Rv#2J7_MTmX-a238Ig+7`#>}jWK8Vk`-qRf9IGrya8`nTi`nPtP`JJlh++a;# z#VdYp9kc0iUBu#NJPPU23jJ7Sb(%$MgYu2i!?K6Y}n_N(9$0xZv_ zejJ%BIS-6kX;(cd3L>pabXR>^?iaRyHmCb35qbd%aZ$#Dp(fl+0~oW?m^^&hUl{g= zKY1-a#(dx36W$&6M8L@TD+~Kvpv(rMWblg})vhB`sFD3LGHO*y4Ocj-(p3D2A!w;- z6UwkpLj=iU;ai%zZwzqi6%p8ev*rACoM=BNM0^484c6Ic_-9V4K=Xx%a{fnbTGRBZEbXYT*M^L{KRteLBlsF zYPIlsj}P-1L8H`EPQd=Zl1@(1Tj1YUfyg5Huot;)rCdQ4v9IY#M5SMrQ@PTLSUhP?-_(kf zwxRH7u3(FI%ArH^x^D6;1zB z3TH40Na5?rc28t*9}Fz}=teEw2M}=4MEX=UaVlsfX-L!*hgkL4x8p0Lw?>ujC62UN zh4b+e&oCZ-v$}7VC*w+CsCx~d@FWP*;;jI6I0xaYn3Yr`3mMN~nb2r{ksQy{5hFE; zpO*PH%d_dZoY7X{9$ z*=VOyjv}{xGpO{ElHBR3Qd9j}t}{%X&=reRQo^u=q2F|QniR3PM9-+PFBt&Eo4RlE z8d2Kpc~r<+|FS#e_g$WA4jnE;{G$|pz5pc`Vm4wzqbtGeB5nlDQro8qUpmY^wm0um z)I+Jpg`Z{if8)jwMTaM-`KFReHZ4@Dw1Lax>Vt|uB$9pgu0+m7z$3p8gz;O3X=p5d zFVu8cbZri&i{6Vy3Whli89!?Rm1TDgX5(F-#n@0dFDXO8=&UPqJ_4T#wp!T92%i?c z=JfOMxR=zILL|h8Hz56MN`eVjX3?rVMr zAjr=8%?x#%gPaA+vB>l$__`^~V}Yo72t&cxd11ns;Wja)|2ADh%;O#iYLfncG`)pi zlaKd4ObIBB$mr1xqdP?eL|UX91f;vWySqzNq#H)ZC;bZG`ktQbXrl>8sD3W*XiHYXMPhtt1@nZ{j zeT!PWN_rhDN~!msO14~VA#kq_&rD+0lsU*x<0q9hOx{!S>IVGfa0PYj=7k_m#auvX zeuFPBv&9-AmKhgHNQCJT6g8@FUN?E8P0#i%g6UL5Y*Ny{ne;OFepP=$TJM9&*V+ur z6n_qR7W5YR&+ATbyM>>Lmgc|)h5*9z6T>hJ+v)LtB>Wz|-{nTE)-;C!Pwr>ZqvHW% zw*tT$3$_*11irzzZe5emO`Om3Tf^FZwo!dDC?S@|izEXhco^JU``PRrxMBu9wHEvQ z6ZzZFe<9rQV}a*Qkub*Nb2RFgBMlB(MK8qvSK-`{lUK&`i@JWjNcS$8SGoa_t&)FTC; z>ph{ghb`une=AfpYe?vQmi#p1OhpqTK)E*)(g`B_X}#Eec?oEdF8s0;ZY%VEC7DmV zfmSk3?I{C1Zp7D&18wS(%Icz+UNCYn{ZUSGC$Q;_{Zr8~B-cZ1ksx~d8DyY~Q=!MW z8^JaW$@+IHl&(CAp-4Wp?Z)P;1bJ}qdHV}~jl;kbkiK`Ri^XNhuQRYvt z+JH^0&yRm{B}J<>S0=y-kHGz0kyHr?2jng6BgdgUva_0F3)S?Ny|YQPGNm-gF`Hi zVSfa0>nvEL8E(mjxUbZszfdsP2X;gLf>n_;L;uP(N#mEUKB`NRm$l2)M}PEGH4ELk z5xU=UlVGIF_1!xIn2;7!Tik{GAS-B7pABu@X~R(lsg$W&v>PE+;BMZ?q|vE_3N%Yc77NTg5w14L`Xc_KK(%y1p3!! zgD-pZ-8_)vzp-GB1j%JqwXq13v^C-_x$kvkB^UenE%k2$f%V|GFmmAIXZZ9&?XRHh zk8H#$wvB`v+JQZ=AD3fJfv5ej4TiE?<`(y&f^kY!!zMjlfs!2A4HIWbLTiAlk&pL8 zkfL#d_%V35%sPwGCE~iS1yCztI&T;R|GC*-g&Tyr0}5StnldeDL3nR$db0eqMQTkQ?%%1r^VcZyxMXNvbPR(Z89 z*d(X5I&MeLvm(z3_2p1WM-U)(Urgib>17;AQyFKT?{jJ>4_QAX zTr(#$i%q)NW9b;~5YS{7$n1Kbi9 z`d!qj;&)r*1q~EBuIGq zVr2@%L3#qHpgZX$>+3uewWIke+eg#>dkkyJFQWU#NfoN|I7b)Bs7Y_Rl)@Nl-3LD9gIzT?*RQKZ0|#ZZ z*@hEM(6TPZ@9enELcd}N4ylt3VD6E(f@KnrOtI94L)o!*%xP@|lP%?3C9XrI{S9rI zStiC|rPi0v(n#1*(~vScMr=&64n&Ez6BFOwQ{kAKqnY6Gw#q>I78pO4r44LTSf)+7 zgU0=Pdg5A53=5GlZmOV-P&TTWXGD#1`726nF(hh3#ee4$`Q4hE4Jcw7_Q}w_>>p3X zE~cA_(2p=#q{q>Vmw%Id>k}U7qK-6q0~pvI_pdmM_;&2L4zgg#*f!U>gr%R~wl4O5ok-MglLp!x3^Vku$xP?aY$*I}u2ylzuo|xlkIa_8?h!a}=YuWJ8l-88gfoQfirgg~ zE{vhjDuoB>>ASG~p`aVQ)y(gb9aiiIZ7$Sne5Xd>Dk-D?+DTstG~D zK-%G%B0nwBLc?rE6^~4I^8n&B*_Y~Eu^lS7_bo=FB^$j>8Er>Oq*1sk$8Q3wShXL$ zP)A=7jxeH-z0ygwq`lJq)gVury%^s}Qq8?$nXHB``lZP0%}dX7Fy@`v@2+bu;uf3N z7zzs!{9Lkh_}=dpf-0WE3mafl1oOQ<=<+Y-uxVgY_zuDX(*pkX6L0lhCRY^c6BQ`sEB}nu43~JIlw@7A*vnhM z3;1PgrMWY%Bp5j>Ux3KYT zvFTh6FV`DmKQJi3ABnf=ih(5wEwa)FP-Yn2&%=e6g7zw0Da5P`yfTiP2F@AIN6L~i z;%a-PudKLL7l|9fMay=^@<*j$l=jfn*XsOxr2D+iYm3e>`ZoiCoT!vLmsG_y#?CT6 zz*j$vXzNQ8SL!{-%>u zko=NIpQo0mCm9J`F;4nUIxBNl3jEFgA$KLJ4;pfv5;PLvyLx@E_Lx-E7-;&Kj}P%S zM$E@0=HqMq9u}al^L#9TLV*&E`B8yX6tzDCS4{%#7z?fMe_B*Ng}MB!4_tj2rK;^Q zdBtr582?EApjO+}4W>w9>Fm1wxUvqP8G_bMb$l5U$}>lzjMIdR#2B9>%$gaV619mK zce#mYD6+553;{W1{>TAS4{Vmun?V8i{7WZ8C7tlZlfzj^(c!uwVlfZr9Sp~SmNS%D zNa^fSxy>w7(gU`IdVm!3uNqwTwYwULHJSG8zMw~X6-K5Mz0_&MsF zD1x$9Kc--P4oeTQKa1h_0KDINjauVG@yHX=u#>Du0Nv4!6Sc*|W}HN&4MG1z)Z05R z^Rl-${%yYaWvGp$>IAXqCV&frJY_-y5|wVe#-5m13@h@j_m`@XgDXGZZ2|7NpC-{= zexeI(4gX~wXAU5KM=}a7{);vz z8dB}~Ru@@d&_|I_?aCF1569BldRs|NTO`Q3bqDv>!L|mvrJ$^=9{XC6cwN_Q_`YiI zBb=k7r@CKh{^9b`ud2aP(^Z_pp{4sE`Y^@h_q1>g)xCxu}CmC{KOs!1tiZ_ZJU`Tv}B0wrkA?TtJhms-M6F0*A4`AKh|CT8KaCJLrvXBQF-zM;-8z9qbqa~hTduws@ zMDiK7+Hb;RV27MfDbV;6g1=H|ESfqn9dhV@9wLP4QvJa~P&Lx_4-*&Sun?QV!|%;C zzCn}&QbdV5my5@WoBy_z;og45Z3`la+F)!g(sh1bWAPJylfqsgurgBG^GaC&<&Er7 z7*9#fInKA^aRD6lmlQ8E8&J7ZUeudvFkabo!`zPX%9HQwqIrkf`NFSHjM=HSm~JgT z(w)WR9#rJ?PiBU%Z$=$!-K@La1wu9_zx9x}#;ciEil9iL74Vu; zX5c0B2R6v_avu-BWX?Tp)cCgd2OIK;s8S&>WKWJUeCK#wu&wY(oltotMm>z-&iThY@^NFn2`Ob0hw` z*BCxQ-d2yXznjlmQD7JNAr6p1dFpSv_^-$#2u>DP^w{Y(mh=c;iWpjPv6<5CPMVB! zSv;ycO#vi<_e{9|a4qi4ywdsqy#O5B_xj9Mhu=T$7StMFl<<%D1l;Z>z@k#_w$%(i z?K-SH^jeb&U%Zxf-{-6m3Q#r*Sb4mYKmP@5K3f$Fc&{)$(|R5~l;yvX2YtOK1vi?! z!`W5n_W6rVKix8i#Nu%&(76A#(^i_Bij1Hsd4(us+I1L5Py}cW7G;%yN`nGD6=1aH zq2g8FN%?KOr;EaFSIUmtR}z#%CQhwaLGd;B4Ueu-Tuss|QzmHi#QipQRe` z>U_mSGqU$b6+o+5kan8xIFy+;H(NS$(Hp_P##8R?R%1Hvqc|v*wxz&q{CevH(c&25 zw};QuEFXbwnpTo+-`)$jwlv0@fW(1d&vKXJo*COec6$Y)iX{XwX^6umCKDB*Uehy1 z647^Zz>hHbkBOg~RFjW;hBSYkuIDA6c#@%D(68aPn@6BNOz_T1cyay+{zmnOb~vYp z`-Xond)84(xLVC%(rYRCb}DS zxhs9jIjRxpp^l;mY0mnkpeiyS_&ng5PC1DM4RMXsfS^b~alN=iy~fy*8Rgx~7_0^> zeRq;nmI^K30*soKJN(B5nNHop@8@S_zHa-|zVZi+NChQYNlz{|bqQk(zX{<_l(yV& zeG2l)!u==t^k8wGw4W9>R1_3+l`%e68`QF@XU!+%&~1xxr*`SDkNa#Z71XoRban3P z?UuB#v5OKUh~_nHO@7{N056Zy>rs#VacN=_=)UAI2<)4amZ#}+>w0=xS;*K!j4rs3 zH`+Nld1W6U?GMgqFZqGu3Qlhks8ZYxS=22YJ+}=R+YPHF>nueVaDT(QVPN`zKL;P? zmXHMZ?t+wC_o=_fZ|p9XlV}bFlLQXMy`Ke%)^w+h<2P>2h|ksI;y4!pcS$|IKAUfb zsy(pJTjIbk=$_jQdvX9>-GC%B#Pq^S)H-#n!+%vffq}-IS?8V#0*D(LY-V%45+tN6 zcoVE&+!vl}gg(1YtC4L;Eb_(rOC<=_OZ}P4h}C4k??KY`x!s9W=F=j*N#0e6A5#(; z%cRG1{|i@j9?~XKOdE`jYq^Z}*^Qw^D;^}gqf?4Oej4Zsr9;%Eu~q$09yKUNIAQ$KFU?!kGw@ zCZ!jL7*XA}@y4Q=EsSC*Sslke(vWZEg(e&;vWybPiLguOl8?h#?OJZ+;^X7o*;wDi z$Nj@)ep4s04d{HRuN76+BRsSi;%PQYtu7HP2FbEG-9BMARFr2^+?b(35Eo+XghUJw z+WJqmG6?{Bht~g&&}>NE1h@FdNBkFIp_>ulG?7=&dxn0H;x>WpK#98%3-1|72dwX= z{W#u2G8X3?ABL!e|99fCntQS>ytr?Tb7BWQY~(;C{xzACEW7C(o8T{7K&qF=@(o!V z*K2UboOIGlSTBmVaDOEINut`V)``^Q~#l7q(n0nZR2RQVCFzB}0Zv66G>MF-W7?c=hU;L-U@o`;kd$@V-ik)IA9jxE%$N@x6^l?nK#Na&ZK`k`(NRp^bHkL zhGF`0jVLm{3Th7SCyv9x{!&Tz8*{hL7ar1#GWgh5cYsE8u_bIkD(gD2%4CfC)7?n7 zf%qCOpGSe(D^+x)H-o-?(Aus4S_DL*p1z%t)U6fdC2!xMS44=@o z{&lYYThfVQ+&uT8cwU=nEzZ%sx2Ai(P1eBUO1CLb;lgKue&(_n6@zkLACOEy_aIA{ z-KKJPxaQ#(kQy7(A=KAWpZdrw)gRYJ&3`OS)~9-i9AZ8~kkC&mK6S*(R$U~AL4r4^ zW|pYSJ}f&PH%8Sw!gbHF1SuGgZinFt1THb#kXZ&An8dDeU4{$I)mJ)?#j)m2y=-Jl zmKH9sD_CzKV1FFW@~u9L+Zw!e)sM0>$h7l#GI3tIcU?y>d;acn>}$D_anxgSN5%ym z4>#gNCG+x>;Nh6Q1?7Kq1(}IltrU|~(+BXBtBFPJ@;{g37y300Wntr|GrbPjrSqHA zGT)tcAvn+!4%{ z&8#5?q8zS2x2$}{(GoakfN*k}bRB2HDsY@~OCvV5x7QiKcCSq0Zr6T*iM?pOMFaO8 zPqftrLPv(Y=U4V7drJH<P5j_iQ&Tr zO-E4bd6?5%K^gBwS@YemO8J61H=or$*n8{i_wU`I(uXWVQ{Up5OFob2xFPtS!#jDf zUh{=g1^~W5zA*>?WvCM%`*2MDJi+S%?$&()0ALWL!jwBCfwp3gP^35N(*RnAnL2!& z>0&IOt}=08s>#})HH6geD_M(s;JYWyo0nXJ9xWNbNmHBTL?<2_MbOE3qD_FP{PTd& zkiRuibgK{{Hf!sCj9j$$E~8C;16vKb^g9aIv820TecN6UhBZfA&+N3o7btp%3u)mE#;2#0mK_ZUK2La_18L@Oun&m>1V?L?EFl6;fsVZ2tSNH_d^KNwy~icdvxyRDlp6mppC7qYnYo zV&UQrx(5yfun?`iNbr)|)Sh)3kf*7YI^Aek1&g*5t;P?1Z7`EEKXB^tP=S-3cDUaP z80$Sko^So2y#D7m%2WDMKBGcT|Jz%&-qYgHE_I4bOu;owQ8ivjlFEw=1Ek;YK;+>i zJ+Y#+mKqAzs1t@P(S>UfTNnX<3C;1waCOD+>(JX?pJd%xUh+r_3~=7mqC(L4d0rFb zw!2WIpe|-`z6`0tw)FcOsPsSJ#P2B+(uR0*ia^A1$Ek^%;>^ews#;T zG@Wa(rE1^S6%FbzjMaTBo0L?=+xh|`bV>K?O9PvvrC~h6hfFht4SB$?_!TOOIo3K- zRUU`=X0lP^-B|`Kp%9WUY0PFvZpW0+t}F)Znd4696&mn1^m-Xv8C|v}oI~&!XVpeG z%?@hha$P!WCjCI(ts%a!INo$!a+Hpt?X=s%k=BmM+xp+I)VsP(A^wk7@W z9luTqxIqyU z+>C5X>7X^FS~2-{;(7i(7OK?j89=I=6wrR6t_m?KHSU1*Hf#kO?OBF_KV0P@0duK1 zapA|Jvo*uJ>_!bI|0I9@(_?N)q3x2P0%UbX zHlrT&2I_vr#;95yV24#RmetL#gb~`{Q`%k zZ0}GVn6URoOSe!1MB!jpU<@L^kV`iD1x49qj1Mnrk~6xWpf@NO7kYf<0VF}_)>}q{Wq%EP%%J<|QG!ZUjQgWp zZ7wC&2d4DlFZ1=3)ET>N%YZYz^Ql*@7QScw9=1<0cpu6_{I30v72)hC9+*2y#nM=- z*K<*eq@6k7X19@lwjJ^}XCl3}!7v(wvLe(imDpS(`+j@1EvRU`>#|TLj(2UbX%)E} zQ->5mq=#Xngk^KykPL#L?Vm8Uwg&`A+Rq2BA#rWGc$gFqDY%t3d?Ep$BB-A>UA{?8 zQCGCJcG*j!naFnK)xIV@`N2Xd@o@e)*3#(gRmF9Ike%C=b~(L=0j!OU-d~Rr4{lpZ zY;KGE^S4C)Pjn8G`d`qU`Tj|PuC|GkH{*xkTzJ!WKLOVEz19Maea8Cy(zjh;7d8A~ z=P%PWSC(|w;A^jY^CG*`-h$B6uK;O}&3qnT$Yml~EC)V|$SQMQLN8Yv!w^84Vf#b~?lw}yHmEbV3Hvz`Uwvd(w5*<|<@2|u;Y+WO!ug?Wwx_^0bQ z*yw9r7O-Kb^HcS@YRUmHK7qOBkPz7`LYrKH9~e6|1#6rxMSF>@M`b>nXSt;(8^ghOBe5zIsLS1h540Knvfr31Ga8T<8qmIchNd*4$N5Sv z+vav7Ceaw682-y19q^{&-F8T^M+8TiLcr_@o<{9`DYrM5BanVlb(O>SSo!*&{j`xC;I@sst z>A98DB~u3pe@|F!Z4-q=Dz%q0CEm2FlG?Fwo>4UdK5cck`Tf+LKK!=i$wE!vj`tO^ z+$8^at`J&4)BfMhUhs=MmWyjTU+cT{vK)&^ZYmdE%>eP9%Gw%_Y{gnHO#AX>?_;Kl{K#q^Tz>Kf4+~u&pUGJE;IDoK7mf7-}2Q{ zM`4B-IQCTj^FA^v=;_FQH9J9FiTiSL`ei!`1+lDNNq%4Xp%-&b_9S4}4IC7S7`_^X zoe7vp1oqV1@|hU`R9zF!Wj}NYLR|N6qfh%8%L=`)V`<}=_#9^cTg)}+GUYFv%60|6 zo_Nk(ts%*rpVXg!q>*OKCW_j;KLX4~071#{{am}T_51*p_S+JL%5nR~S;9qojJv>I zz>x+R=XqQQ#EEq*)T$QKj>z;Iubi&|6&ZU4kVgcq1vZ$T-)^jXKM|=hH^_rUk&x3` zH|dL3C%PZq(dOEAl=RNPLdqKzM60;T(tj(x>tJB#e&U<)+tmuH(ATiyRWHtm;|9qm zRv74tP2A{l`_AI==|HKJQ5vsQk|1YBxAs^Rz{6NT!y`9ks_M5p({6_$Ao&+`# zemsxza?^{#5kuO54EsY<^RD*?UZOP$Y(}^vKv4$>E5}Sjr}6*J3+U0ovHm|B>UjEl zP<1tTq8_Ol+h#aOLd%z9Z%`|y)9}7(n(!6tQeW<*TCGqm;X9~1^Q;_N;fz=I_y)Ef zsK9fJogW5eUX<1F$QQpO9?vEgryq#3!etnqW}-;NV^8@pq-XxpEaJyhQhigOJ8xzt zr!sZTuCA+j;aop6la20Cm|4-gW;xPm-tzu0plLo19M{yN4A;X$CbM#n7!((};6$q^ z!FmgO?U7?>?l#t&3qD_Lnec}3q_6Ff3O5pE9($3vOIX7o$zo3G;w~_DqNZzl*94?A z-}*>&AAyB#u5HL6($m~(>itYop)xj(`a2+tI>o}LmLp(HPabXc$(HfvyP&<+%|X)K zTa7#Of8x)qGJ^LRQbTBm%Vs=;&hgH&p9l4Qen-p4A5>h%qn^#?gw{lPn>5YBpqqzUekG23p<~$! zwN$KI>fT)fKrcy8*IPVxQc_r{9 zaYu1#I=FCSviJ>g-ViX(pLwQL*y3H=K!W^(d2EB?vbM>a*E{a7}wL zT}FB;Q+5OIUg(6#Zn2JjC0edq?AP5W{!}H|X?N6DCn-hlho&EmKuji`k2(AK6}T1j z)>cgR^$bbhggSy6#Gktu)7xqgl~R8bg0j&%8-WZOBeybNPk3mtS+8*MFmZ|U*5oWS z4^kMUf@t6Q?8;I^Z+yvaRFk@lN)Rm(Qj!KjGtkBiMkF-uqCqVamWfUNxP!-VvxXmQ zS;IN4f7jDxYCSZkM1$sibTV~%?!pVzDe=+Kj=0;oQ_M;q+TNS|ck)P2{X*r))IfM~i5JWX6;t(Olx7<^vPV#i#Pd1{xqK)Og`~yF{z~zXH zrsY`D4)Q;)J}ri-UPscCUUHHn5IXEj``mXnu-?{?kJNT?lTV47uZuqJDN2oQ6@kF@ z{!a+XaeV~3r5#>~?7QHe_fVrRPb)n}vHE}Uf2L4{u?$hUO`lqcI{Mbn-WYg^4hOXP z%~-YBO~4n{^(Kd;CyPA(oQ3((c1r;|DCS<2q5T1fYO% z+~t01-s!wiCU@Ht_v3~8R9KkbgUXu2A@ALzfj8p6?}_}8Oxdx&cZb}jaME;~_fOqk z0K{_a5p_i`-rx(ohZI_Z@1AS}Vq&r)-fQcyO6ZwHeqRUakL}1%XLA0lIu7kDSbf+S zSgyQHGQtT)VTCjowM8sSrKD2u?k)ycS8XX8ZzzXHi(km^@oGeA`8eN-`#?ENi*30&(8N@anw!=UjU^BSdg|?mna&;TN6Q6 z{mN-D$wi=Fop|N$bzlXS!V+Smw;j{!F5@_hMN6 z)+@|$T$gFC#@%~8fMnybQNufkVCrOZS9-LmM*FrhzQ9dRk6GImL&VoK{*VScEUjAnu>F8hxaT$iMGI~EiCJu`IH_91z0YN39r zuWsIyH9hgFbu;vn^_X-fn7E!0+#~uylF;N9IK+9WsTY->8h`9c@?oDf3-fDD?4+P8 zYHji%+%I~5;V0Eld-C2(`*=T`@)u71DNQbrZzny-vj7j;nfKZz^bJs-pVaw$p%4j) zojBjtZ(r>Hu;yH;%x|-FS{(}Unk@P;9JJH35!pGG%i#oi4#~s#H6V}LWzzcP>(ZB2 zI-Y9mf9|((P`9?WxLSK~--{w17urJH0UTTJu(;KWb{xCmmCapJi7IdYxG0(cT;cj& z>B;T>W`k3oX*F-z)iB{no9c8E?|aREoR$#V2y;{J^v#+Z2tQcrYPm9V!T;(QNfCw@ zQO$O&fB8knbA)Z5B;5WSFkJ7Ap1)!}_xpZ$B@{12Pi8~7u&4AGJRtMPx?;L;7I={Y zosjNS@xodT7A?h)N=!=9I+oUSk^Uzd1DK#{*i((L=)QrhPTk1&aF~x&g}}E!QOEF* z!ZXjCHpj|Q6MT3P-g=b7Q^Tlufsq&F(HEE8$xS4_M;IYU^Lt2q9C4$ZNb%>R&B}jd z<1gr_0E*6xPlSCCgwm~oF6X?vJkhu=#5LK~sDji5VV3@^TWLv(^GU1EuW3w#{}!&> zTEdpt2GNcJaoSDx43oOQ!0`-mB26mA)%If6Uo9VfJo#I1DY6}$FvOGe53Xn^_e$}*%1t#;%s{<~pRtTZ=O1_Aa)nhix_i&EnY<=7K# zS;g@C^2_!6)U?2fD%s^kH+|AJ{j7!2Yq8Cj;zG-5iI2+H9nV*W?I#|y3LxO#OJ-lo zwCH3%WP$(+byKj~JeZhtzLlyDL|87CHx+k2TJA|ji@_(&XYBraj{uY0Z$?BuI)Dl9 zo+@@EMsbObsl)BVSJTGYD4LXpoZCq%vM{4rxuIS8ERb2%u8xP;3pNVTYIz2hu=gwO z9f#wWEMox#5{8oxr_Io|j5u*Q2Z8DTPmf{ec=I4Cq5Us~pxK()s+)ImB4k4xkDj1c zljgJ3+C$6c3oUj0fIVZ1Mx$q{5|x2Z3s5+N`MyHppRBo(3TLIU1|T4GX}OK2vr&4= z+(Np>yD2~zIZUS!>9#xH!csG^(m_VvvzGIUQ$7kVYwe++mV%Zdox7dz3>+^G&<#QRqT{6eCpS|USp2U`Q>!pnUCVoGe>ujQ#(mga{Jw|pL@!L+#O_Vo&v2bvKP-w zQupqw!yz{3z{Br6RU7oKGl?Dit(fJ2wE}BaWyw`mnbMPj_Ag-2 zi^5W}5$2COvtFvAUt&Z~>WDD%NmPTQfpVp7vJy(FgzB=+-13IxL4P{1Uj^;+0e>aQ zY_E7)EH=tTnk!KcgZ#%Toz->R&u8fY&GysuAiqZ-e(ry5Lcb4N@dFO=r8eS+9&seW`+%mWxAsM~#h6 z_&~L1y+bY%sP>KS8zEOTy+KY`>E1hH5yj8|vE+MOaW0LyBXzO7iIhZfG@B*Mc$cF3 z5w~o0cixlt7PuNKzQ|)23P#|gk3xE_v9F!aMhaw@Gzobj{v~DpjR5UTVjw8;Uzt}{ zJp2yJb&dCo$nXxvb4oIRV|qIHqTg%nSrJKtkMg*U?@kbML>31Hnp$M9pRuQVjx@U0 zCRH1b6?T7Iq7+IJy{^XEB`8aRoH=2Ekjxt+@bZNMt~_1PMu~sbn%ne-WmU~k-emJ8^rNa8w$`ej+{1| zEds;?rcv^bqNlRAr8BrBdiG6HIANTG?})H0W>l07lt$qj*HuXBfY_Q(oRKrbl1O5w z5hI(%Na>pcZE1H=9j%C${6TS_aIEXqC#&$w1u{8$$-35_8BpSxC>OG7z7N3Ic?JKt@6rVinhFIzX^lw$Moajvv)p5kzcR&MXxKhA)8 z6+dgO5xhJwyUDQ=&58x!5V$4{Vj4n)%LhD9P2yq_&@&mtbt*0ed#VgFH#Di5g@UnJ z3CPsm&uEwz-aW=6^DZbVs6IoCebe7w4Sc*IPWDA+ksWErN4Uj}8w`J%azh%NN}b|# z@$t%+(UAJ1J-;jn)K+MtF){tL^z~EcRmY~YVuqkyEoaZ&{obchAu@Z6`7L}IaueS% zmzIg8{D;Scvz-aNF|maVxrsLd){!UWQtPuBQ(LY!dP**2qB~}LeVF%_F?dS;dmT9g z8?nQI_LQ>mmmN3sa6_Ph;qvf*>kuBHZW|;KT0Lc`|8zc{<XA3`o+r06d3#v`5KVbD(SWqZ{I=gfIWeU27WQ8Rs^;b%KU1FRCD8p znDkFY4f&l(2t4dI8u+vdNNIj=j_e&Kyt@-U8W)!iDv=UdC+qq1e|3b3UA(_Z55(#l zibCA~0xASr@kSbs|HM~^%4=PO6Spv^kUHw}^k(63+p0x+7o*8zc2paBntG&m&xd}w ztHpDE-WhsorgMnTV_;q@cP9LtpZCR>tk}GvK`qypm}_N|lq@ajeY$O$oyM>r&q(yg zfO>J7oleQoV0#gLTk&g=B*8XhV9BT6vI557maF2fv5<#j<{ZyOY_#yH;2b3?!T#W4 zn0&nK6@1&)T1CW3}I5Sxk-Q!PJ1qk|MujQlW z;Sh`o#`N1G#T39R@J-BqSL&U9f|1F0hpjC)J+i-7(?xcDETtUnLz4x4XIX9e_UWd8o!lk~|lbdhVnJbhrBTP8#* z(K;7+;a~j^__hmpt^Zxe8WI)o>ilC%*e#Fd-9xCNS|=uvsC1loe&DU-Uch4T0CzJr zf41)D%dWInOz=?fCYHMn@c};6V}5t&M$V<*u@Q4haTg;cMu%X$|22x3c;VGY7SIVyceO|*A*n%G+v6{`7Cm9L-71M zJdlo2h_O&;zKR{SC-gbh4JTlxf%A>$$k`QFHN(53dyk7~;EQ6XPYKf7tg~jJ?6E|< zwkB0tNfQ3`ED4^A<0uo1T*@}1EUQwy!Zr)~l`RnNmt6@o;c2{oiQLfg7d2V_SR>-U zb9r2};{WKE#1yO{SEMKopt8~8ogew^Gm-C%02^z!Hk|3c3wTum59kkM{_Qza^FT;I zU32YCZI|R4jH%S(ka+kcx%r1{5(v%7ik1vX7&?5aiii=Kg*4Sde?)=q$J&iW>dx8+ z_3XlegHvPwApIJtM2FhVhZ#m=IjAuu&6UMW#?q4iP#cN;Ci`q{PQ)!7$1G>HUQ_O{y2JI2M|K&X%VA{RM8@BNZ2s$775mge6@%Z){q79UoV@D zKzT-ee5^Gz@>~wwrAZc;Mz&!^snCNrkd4eGBtRC45dxrU4}v4F_M(hyLc)}20};L4 z;u9P}?)N_%xB_MHEw+b>{F7{||NDHY=9_!iQBha15oaS?Fhv)#boU9>Q5h>W3UP8j z2>mQ-YwK@+#jg=V3jC>OKiVJsm9<1+T${zALTVwV_R3WK;qN%Sl~Je|;6+gwH;7(W zg_4XH2%C<}y1YNavU(T(mQ=&u;AV8oH~nDbwL=W>#?CfU>=Pc4XSBb{oM^bIX($vp zZlgp#JwsK0UPUc8dTNJ>pH&*}0sV$6t)XfQ7{ks|p~g;2j*}tm9L&Vs{2CHX5JRqv zU5T1pk%h_09GLwXu^+WvOJq%DL%kPuqRzZ9AAMt%H#4&*rY}eGXCKAke7wVUfUkuh zm#P$XWhS5A=G&G{Mjbk)yQay6M2?MeM_(ms_q$!X`GINn!iD;b=0qFquGs-a&Cw`D zJM=J=t(v>p5(;mHUtXK>Zixreanjn^fUY&y3CeNyJIPn2P`U;Gr-{Ww20>iJ-Po%Uf46CpMHbXFVYVp7 z+I@Tllnv{Q*A1r^)@#B#AZ@z-5UYJ+oE5@aU+-qB}d|v?x6+T z3Yo$Q7P&I{vLo3ek9AzR;bD0&fh0_vdwMSj^=_60f%Hhc=RcH?!TjPq(Ju&bjWNcj zcb{do?cXwQZRLb+-c{oIQ5=lAu!}P)GfmkI_r#eb^bp5{{$N6X?LR44KJ_?A+>`VJa{Fs85j~-IIj%M#n;&%Ybr;P# zR+PWNf?YD(dSdqf@pKjrQMN(5r=+E&yO9Q^VUb2kKm?U;q+O&N1nKTrkW%UHU3zJd zF6nL-SbF#D`+et}f8n|BXYRSKnfcAjoV&@C5|<27&GJF21(;9r9VSmD!%?E3VGqWx zDF}93bx+b`R9>^Cf%9B~W?H4V{e~<*XG;j%js$xxBePkbIJEU zsbRtB)88W|?PYgQEiNP;<@u3;Y3|uqCXKY#l2I1k;>w#Wi=x;qGp}(F~<98HQLhL^+zSRqC#T5ljG5(cMF-Y{Z|Kn z2sHhSePyE`p~P&`QM2>fIRlmp982Bs#$X^^eWN0B{>hT(8Ak!+2o z4ii3V1mUT&dF`_RYiC%{88`%E`BcbF9DRAycFdP{3ng9 zv2I&WA9T!2nm`$6Eh!vJ9jI9J--A{f-PP@P{LA;jN#>)a{T~mkts9h5LnTEFQU=C3 zri$hRBUEmuiMKF3|gNz^H6iyCylH;FVQ>S{D~r0Zw1?u^UMKZCXl@5%}+Xx_SkSvx;_Z2|613oV9_@ zFE)9QBlDNjy3y{kMGUg&FJMkWrv?&h;JWb4WWzEJ{@}@-KFm~#zd1aKI%M(wChP@~ zVX?hjy%6680+ex*o>ybgtX68xU9Q}Nf5mvC9R5l^|B6K;C+o9hRVyRhL2Re;Ow!P`RIMIas?}-s2S^xec2%c>9(uc0w=$SphneI&8zcv6Sf(QOuc8&cNiw5{YTIpXsy0?0n9w~ z?@Dy?4H+G%Mq&+CmTl=Ua>9b&vR3)487ucx94mK!7tz_aTKS2qkoMSN_6!ZL^+)pI z%hNX`95hr`<`ZMeG)F+0Tj$lL<@BNq#^8WS!v~P*JO-|J-}i^P5a3C?GCx`WJ0=e9 z1pO!&ZCO+owg;u$KZ$PcZHXlQsfC^o_Xj&R%&`h8_sMfD0|`yPn3{~`FC*$QRT#H; zT1gt?sXygAf*O2v`6k=qSmZ~4@LhO33aJ^ZZ@&kQup?N*iiMTx2qY-^uSgJodf*h3o>E$__^^dpX{2%$jiYx!dDGSk+q(ZDOrh}GWF9Xyug9=~ zQr6wscGBg{-g|cXFW81ZZ`3*)QrhA4UdzlOI0x-iaZ1EphgV_mPj(ppfVIM_oVzsM zwJPlfHVa_UzJhCQ-GR(ogW+Exs(JK#_luv$#SR#+ox`NOwdR^jE@ZGNqW}*G z?;drjeB4Hp{|r!_&@LJ-`cK-u0NrM>Y~169fcE7>dzGe>FYid#&&vJ3j{I#Qac^3B zXTqIQ(6QyCx<8I_r>ZdZd5t8~8S9qs?dpNOm<#gNvY+G8Yb;!g*BZ*X9Su zqJ>qDLyH|~{t_dvM09Q5xa`xim#cv5Lth(eD%~S=$>}wg3L1J~@B}LklJhW{AKdc2 zKa;OTQ()$oSZFU+Ku-2MiiKi1kFk{Rh4RS>W_8UUKa5$X60o3`aQ=~eR{AowPYl>B z;DDkv$NpB=h8QX9!DLGFX38p#^~?y}><0&|HKq>K7I1oHs)jNME!TG{nO;5R(ZgXq zxL?ZN4$O_oOp8raUKo3x;2AXPr4;bo-JkR3uqX55Mw2Cf8Qk=He_?7rOZFYxIl{(J5yQgolONXN99Srn0+jG zvV!*h3M6)P-|dn|%G~ik$;|VRu{Z0ozla%XkpekvOV$yDA!a_~FqX@fR+N?Pg0*_x z^J3>t^%PK^Lp@Aj^J5Z4gn2LLjVg)8UoO80sLvL57G|koV0;D#Pk%^jma%eBEdDPu ze}lD(9G=}-P=sf8zt~-Uh0M%8VL|{QN4wqWtmoIfU^9WvyL^5FN7#QaDlS{_*T_ST zk5|8#cI$DIm7b&4*ec|)xRcgKm-0k-}^XbSnVsBky(+&jiS z>8&|6U5tX$Y07uDV~(a@(!J(R0aHVRR5!Go4mjoeh;AEQ-YtvaY2l8IG*I-8bA zM<1H$fo!7ZEBV1X3n{j_`hIe0p_J79`lrrkZuj z=#?7eoPRn5a1`zpF9Iey!9RYM1r=-1aTO0(oE1hg@$VVCz4oqr&Wds5Q5btkku*nj znQGP9oE&O&*h$MWPQUx?6PVg&vnu!5`{sDQFHT2uH+cuvL2E0 zby|l9qRzz$4qkkISb|$AvhL?TfBqWvOo{!MXgkSa5WFjT#6|oCu9p5t~{ z@1XB|j64%R4~%aQ1I*{ki;D1^52R3TEhY5VgF%Iz?;_M>_kKJL13E4umxv2K>f~IQ zx@9ozra`=)+;!G19tb zNpb@u!`U$d0Z&GeP1voKJq$K2A6e7=G=rkvoEj-}sV7CyhzWoX--=BB z@at4JaZ3u0W$^2HYucX%^yk^*E;j@qW2JZEAlI#KOYbCp55D?Q`dWh1gyDI3q9rdY z(br&B+AneW{J)PT&4{JmTxP>ToIRftPYC<EAt!8VI zG_|xwzEuc1RAYXu&XY^DgILc*o@?>lxrVMZ$n8{oDRh?>!A6-_f>-TbZqFibE=}n<D!Wq_!IOaI~8GNlPQISTr{C*?&74G{+zS6`*D}Is8 z$N>043EzsB7uA~rfqweZB>Yhp)$^C+>++!=?8`a+@mq zB>V$RvEhJ=vWm-*ITTA@xGdxkahsqKTbu&<1Z4C%uy0rBSf6D}kahN-H6N!KJ>l@R zDYd6#vB({ql&!Ml38;xd+mcrGl)}b|UKk(FH&RPj9NVA>%HT*;_anREBQ)Ztc1X3A z(7qc&KmQ{(7mpbd%o^M7&^mE5`-4r^nvZmPdrvYYV2b0@PDQoqOZCp`wG#So;#DBY zJ>%iVeN}NH>6$K84xgu*vfQ+-wETx(2?jr_jdSm*>%V!n2$-?^j)|f?r5^i9qUIG( zuhxt7b_xx!nGK(4^M3UzlzJR5VdkrU8ph}|4TFFqNsu0P78=j=;6&>bPOPyROe)-| zgBTv5A1K_-4Hekd5|>mrcI2azABvHy$9a#pXr%DJaZB?#`rJ$8HZ-2+7Bo1Axzrkt!fL-bOOQ%1h?lJEQ$1!49-mpTkGq*x-VK2y zYda1XJR=hpbqX&&UbMD^{k2>5`1%oS=C``dtooE0w!}b%Eh(3?wVdPD-huMU7k7We zr;ZY}XzhU@r!Ag#SxM1{QyMEX|BnSQ=WOfxINt*egoUHvy6_b#MXh$!!%B^s?xr!O z)NIimzw8YdU%ZlJh@vl|CQ&3;>y1%TXR10`KPHPTlb1TJ#$g2OrLgL=NTQ1g%`J(Cx{7zG_N0j)kbGNTZuQbduK5< zL&|Qph!eqcT1gcBtN>k#O0(TT?-dkMl@_{Qq7|(_b5T~Gt5l=~wk9yS+P?4oGkOzu z20~m)&%>l<@~sIj`PQRp9uW`3&;Q0dC0Ey$oYXN?`f)bo7hDr~nK5U0$90`;D8$S~ z5QiAj*VjCDCsf2vFzgN>lUm+IOxAYY;#3xtml6;_-yStRAQUNu{;h|eJf9b0?NvJSXjy^s{1n^Xv zvb2@HD?#LZ7tzJcRzP67+3Wwp_*{ecfiHE^>+j+XG;xUa?JOqf)eFu0XQ7E&?SIX)&iP7eTzl4iTI>ggwxd{y(^cpv4$u%o=Y>}au zD%|vVm;zl%jBhUq(oJO8yLMhpr4Wi+^#3ptX+X=xu<3J}?G{sog-bJfN;KktZ{Fvl3)T z3?)~sLea{Z?HdO}YX!_LYM)i(dEnRiPP;a$kcIJ0DXksUFHBFzeRKY!dbZ5ibs-$d zv@P|H;`>*+e{xo;wI#*C?C(6+!7;C1^nB47{S8VsF)n86c=vt}HloxD{@&n9n_!aC znmeHK70-{#v)r7B{wPqwKr2j@tJ*Y~@-q^XlsR8Q5SQzcXMK#`oG=p*)i&l++VaW7 zKSFTlS9Qjy)Q6c$e;SxC=}yd4C={#;h#cKjX*hLuiLA2jl6+D+Muqzs+Xwx# zew}rc#*Hp(gO0A664Zyk)$MYbfYor=2Fr543dcsJM=WaKd9t4Q@y5?58ts2~o|juc zfLe6Cb7=q1U~D1_F=X^}Ywzv(n0mYyYuB~RK;^;CgI!Dl9_@Uuo|!fE_W$Y&f9U0GP-V?ra1*RZ*iP>-jP_0F zsW>bvwJKnx*;XJ^i3J$wn>peGs?a0QkLNPL6j*lY@0<-xqXqnCCgz$Cp7qz>jpye9 ztLy_RJG4I5xmDM>t-)`n!Y|eRzO%r>t->n2U$V!tm~v{gjMJ$0r?Nz8CeafSd z8ZYBaOP0jn5NA_X@&4fYV|*ECgDEw4Y3hN1+Z%ssa!Bx4+$lAu!ZM z57n`?o3vDM?2r*Qd&LBPAAjOqL$inuFrm+k{=_kEkVH-1U|Ig^z_mr8V3_-1nZI3* zqn^xM+-klqE&rAC^xjtUoCym9!GGK*x3eeW^s|j5qo?${{@a&lLT)Y=@o(bGSFmjM z!i!0eqaiaEplWa+NQYsJEsIYz8}E*&y)e)S52Dy{_K!L~iZY2U@Klo1^KsMb|2Hsq z?A|J#Pb8L)cy_j@X!dJQXE9*?hdoG}27R5xjydVn@VlDAE+E9U*f=G3&Fs#`KMK|0 zG&z37d@3d$jJ8G`gxujB`3#bjXzWVrco(rapt{{bm0)?#%$QMsGky|Rkr$Ga&=q>O z{8K*Q{T*;c-4^tpJniR{e1}U9JZHQwTnHp)_*LVyPB_twygn>TWr8}-eOe|70m(SH z-whsl|LyB|o)ssehkIO$+H_DjFK6IoaJaS#EEZc7n?@<$QCy6w>K3D9RQk2>gbpC! zX;gMrdSw2COVUp{<=h*({XM!|ZYHGlZFvMvkWf&&&(2`)t@k)1qX(Vec81sLi_%dH zS+Sg_zEC`;18I5G#${*T^D>rKITq+Mz`vG!*;oq0dHAmmToV-}w_g=uI#77j^(?;S zjH;6==EJim`I2BFLXq>0@V1VG=UAQXsz=V3HU`c56gQB6s=sRwq^J)}3?*Ti5Khr! z@bpx0343f*LOxbXZSX$#YCuR8*divQ?DHz{vcvfC7=KYx0wLr##4a!YDrLeK_bwCe zDb%IO;ernzQfY`5_c7hbv_zVEd`dKG7*LksKnW8alzYp?boaTH29aQ}y6kCTwa((U zFvLHjY{B7NgqS;n-(f1_&AuK4-wkbfxEg8|w|(Dfa+lkR+_E zpDTEuBBgQFL`Dic-+5W}Fw&o4B6#PS>4!DmYn4GgGMaZxV;sNuCBj^Qc96#L;+5Ap ztTRElYz2#Eft24*==Vs5jUvc{ zBu;Nev=X4Wiro_@RE~Qb#ntjSMyJgU@LcJxZ#yOa!>5qP$-Ond;Yj*BKZlWD&T7TxG{OH)Z9gWeSXmu78z? z13MiQsrjFcK5LxZHZHx_KI0C7t6Yf;$|Q;K=NfzZuUa5Rx{iZlp+gM)rj41d>#14u zNA8lJ@e^?KrCNuS@{V^iUc;0Fs#rQrb+RjRA`T`SW=E*n7?s>6$s!ks#7jGq#&tIh zY+kQ-$65A(WYq%#{>rEEdzzrDgifl_OWs(0u`al`UsgXlDAN+yPuo&k^iJAC8~&I) z4ZTti4E7!tj#MvY5W_)7%W47ICsuMFhiREo1wy~;CH}58Ul9!{wp|{55tx+bqnOu_ zala}23)b7|!7=JT6zZ^0D2pG%#Blxhr)ZMDf6NCPXPwvrcPH7A92~36!)K*RulKq= zrW#IWbUxtv-Q*#S!yb~Qq%7DzEs<(CzNM-ghi3-a_Dr3SPI*5yi2Q1~bvmELYGW8n}rp2b0_(wZ*+JFGU6b-VVU zn-oBJ(69?~5@3?4U6w~qb!LFrZ!oxMUW`b;`66z)K$41Hs0l98Y*W5p+YF8MB#%?F zK7C+=ua)@UYjLG9_1uZsJ8ui7vaI{FJ)kPk{6UX@tcJlSL@Z(HVVD~eD6lN=nSVF_ zi=vEU(F>Yz%nfs(W0@%`NpG2K z`-;|qeKEh*dWT*%-Kny}w5)NWD&gB>q}RNk*kZZK!-A^c+?yuDxmqWUxk-GsWO8a- z>PXU2NpyBKR+0w-gMc%fVD=u&)tgnrCpT9wLqh{YDrvmW=)A3x!0zQE9o zu!e;PE!YM53*n`dxW=;RSZi}`XVsv~5|`oYE%^ckIEjGyAy)UFGpvdT2{J%9^L_pH z&c>+&Mcvx+gD+5{V?}H%>9j=eOn$K0JPXy>oLCJu!+qy9DLXFeytz31OaC$wk;$6H z?E2j5r0C%HuVo#~>4BmQLFaknS0-z_NwxX`Umr)5B%{VH1Ur()3#+H_tnsL5{6u+9 zD{wnXZTySvzo3ZUVQG!_McEA<;9l-U`8;3QOxrgJ{_l^O^-I9pHk01>av3Vjqf8FC zJ`)_z3p3jyITv3*Qt`zOf3`eyAQKg69K9y&^r*M2hjd|FoWCRK#?_i%Vwl=wlel%< z|1p<-%9xLOf-_R~<_+lt5yM(9`h@$0pH-*ES;~tjIuXKXbQz~RU-h-g9!{@z?$9_5 zl{rofIFPE_MP90hwEW#I|7m@!CI@W@o^yHfbxtKs)OTyUG?mfieB4>OVQ<>8o3;PO z=wotYazUubCTm+J(+^|hRwv=kP(=_~m`?nz{qbTf=>pS)U2*9q;IK(AzeGP^DEXt( zyr@4X=iOc5Cv%;9n~06yB~%#SyXcv_%GQu`f-B*W?;s(ZrMLaYEN;_xWrt&AycfvL zo0pA?o1KYSLHAi)9XI+`b6eLM$YW@=YuP#vr@<~&2vZliHR3rKH*kEYe0M(aCNqFj zYvda9KMgV9=I`Zl_0w;S zqc#xP4T7+$F({sEs6r0W8I+Vore8K3YgJR2P)+gpu&@!xw`4#mhGAGGTl>^**75s^ zY!&(uBulWIVH3b21i~eZBISA`QmBxVE05=UwdIPS?N#R?e|~CdxeMbEFj6eD%JhFMOB8J%+oq=A5~Oma`=KE8)sjYGmwUgg(t0FJud|rCQR9`(0#hT$ zC{HqFsc!!8`_HPzA_37~Mg=io&5zh1Wi7yx>w(?F9O8AH_Lu~R=(Sr;=+AgWDaPy@ zMJ}^NC0XlzSSZ?G+`1iLU|q?0?)YuvxaVb!j)0nxdSPoQ$=p5AlkQIa_7ufA^f}C=!T}7T9U!zLDH?v^Ab@J>F z0)9w-BC0$e|A!0!(CV{(wZ#%ucx2uHJtdPl%rWRtq$BY3sk*+`W%NPw9HDz97fSGL z|F$Tj5eaV>aDl|l9~lG3)d{}Q*xaE0ha9#w;ydD?{p@>AB>lSN$;-g?CEybBey{2a z?>ScF%geB?C~v@a^;Y)7k+<(OtVHzYLe#Dx2=S-Uw`8637&Q!gPO>f?b%#~I_P9v5 zJTMCt?0A;i|6QI%=r!)S>z!tzP`~)&o3$ZI+TYt+kKI9!%Lz+Kq82$kAp$yYD6L(5 zVK`F0pA{2_u|hMc#t$l{wVld9+hwX`%}N?ac2h6YdzOZHH$~6gjYP2dw|CGjGVj?1 zov+3UMqn~UvHJ|jOpipWaKFF^k$zN<5%}m9$Pg#isJO(*cQh@$HtHfA8_ybfY z>$PHc!p~FpH-d1`zI$uc+TsPm6*E@mp=`DioDs2n@NTT#S~I^#?^I24;PWqv6ZXD` zOxr^a(qluyW4OK*;%Q0aO&;GBJRI_^C2_yzF08+!AX--o?fa*rci7~=!0&KWGWJhp zUxYDiSfKm&A5CJ6jiU}T8~>rR;~H*?Hk?Kp7OT{-5>4-Do8>v1=1#6b2kNPtc z7Z!i*L{dZgbg>`VX46F_cO@R|t^~4NpexaIF4w~gW7KenwbDho7j_P6InRzy#h+s? z3@aq5>%GbM*EmlJ4D`a8x2n!8v2jLyL*O55?uQzZv+EwHF1Zk1bSqN<#_F$GLUi@~&@ED;z@S?ZR9Zf(ci>-%&ES}~rMJBbI7?~8rCy1h0 z)OgFAe-`GIve9I74OIDBw_UbrAxgNE$|V}(v6DMP$Ce6BlKE18qz()`WVDpMdh5bY z%s8I1lfKL}YCC%w-eqzlCnpR{zu?Ojm!@2(jeyp_Y!8#2RBx z_|T^`#Z0r+z`Vr5>myQ-XeMILYB%g9Rl<@{$X7@scd*odKTRBN@+#E0goGOlKLoX_ z0H<17wc7H}_7gqk1Nz}@){P;czV_)!(D?5M|E_5@J%dhPo}|pGd-;4apo+}cffb#0 zPL(pBg5!q`9uee|m)TU{7FRWB$&mj9?_ty>m4)O{haZxK1YJm#Lat4yX4{A1vGK}t zX7OB6k9~1ROF?&O3C#fH#dH0qUuDYJPw)97&oG~yJ5IwB=E}d`l1iZKTx^zy20p1) zRT}a$V7J_XUV`;%7#Dr0yYdv%{Kv?=sxIl+8g*TXLii0(v9F**9dVK$H0rg1kYhb zfqN{UM-w)#?ebRUs;bH=$U2_{`zBUkSW7*CA8?Kzn-)&RKr3uml=$I$N`yX|8s%d} z;iS}}wyvlIQ9o8%sgE3#-LAO8{tpkJ&g_IGP}+4VRO*E3gK#obqHNY^d?H?JuK%SX`l(@u`8(&SVr|kYADp`%chr0Axoes;J5Af_#p-`#xE}nD>Ij|7 z>%=jl4sNqoa>}_;r-urFG8ilDm!AR1Bsef<-0?b~Eez@&#BtY@gfgs??phNV7xWE| zW;GsS&XabQ*SLm@4g8n80f@hj%T$0sbA-;_9~9Po!nom+tHNy(-S`P4C$MJb5jc9K zk`)e;#tGUSxL%OIh=NlAeBJ~|142HszeZLMLZyYX9-GgXDpQVh9b)?GaUg^PXaEW% z-7(|pT3#;>VEOnhf<~t2+$-q7YAI2ootqw!22_0|ZvJ=)0vDQisB{kRowI?FqPbp& zwZ6oA5JW58^6YgR^a-a*f*?e}971A_uPm7~=u&ecgG0`fWFNvAC~E4FYjl}_kV)mM z#Tetdo17)*NyQn1g{^A#Tw#U2;)_iIarb^#)*Fhs4BDKX`)wIP!n75-W=dX+q=EZ=Hgj(q$s1O&yBVs-|LC3x_F~3ns{M(pPA!==-zct^#6Cp(!@> z>~L;thLm``p>p-IoPoBHqss&!JH&~%{cTg|l7 zhP2|z+{>SoF|AvF2y)NB^aV*tXDf#~ecD~O!hol?l;yL>yD+~c*|m4I7-l`y zA||->gXA_-U*$HQ3;~^_)&JVs+_yB-THlyI{Vrbu+|@cR1Nu}NzTaYlY=D@(o0DJp zT)cxWmIdqq@D1RytN%_EoX#wuqqEH!x@ojRSY=7$;u~}r{rZGT&ewF=&G#u;IE>{S ztg*}VF^I7}5+#3VPFx@1vL}uSkbCgs>lP%gvF{w-iEGg5rsqj3*BRuUJEI83e6@7R zle7%~0kZw*01kQV?mfwxUz4-WD_LJBSr?3iJVp^*6@z^Hr@AeS^iaR%c{i>#YZhdh zn=}-#Bh9mAO?b(0uTP{s9zYm$d_#>Re*{a-DgHfi@k$lHQb%i0u;J^)o!#rzoVAjZ zPmq^e>1>F8tC&ty@u~p;8XRFSSzvqPNl1}9D0$?!EE?QP#s(%xQ*uk{$ zDE!<0ck89L95XA`LV*K9MLob6SLC;kr=`4(MTElAHLS$=R>K_N&>XN5Tfk3UQvl3i zU5DkOMkirG6mIGp6_RXK%t3c>yuQt>;Z9aGcK0`4-9ZD_fpe;Iv2ALuh#E3e8~KII z1|J&w=Fg&vRrPg@GX=z&KzB~&p5A_2#M)pf+P7u*RZ^+;VwF2_Kjj+mDYs_0fw_PL z-)hQCHlLb$W1A6wGwulg1kWdquEgLR6W8 z3Y~q(k<}5d+gU&2R*4+%JKwu2z*_)yyKETTOjSEio_vC)gYJ*qSYrjeJ(yoy z^Z{rU?)|WaJl}v1NVgMR#?}|+OZJ%E&3r%EyPIJQ(TNhKGfp)%>WG zJje{2dpH@yLzF?>^d52s305E1V{j_%=APX(ln4eMKf-(GLG1-hYVYW5;G{$TOIYHg z=Sg-DpFRB(rnS0O*{7w?59@qWs12@HFpiT66QK6PqSn!Jh+Lo}?YzDigDKXy^vMDo ziI{nQVz!ES{{RACnfsN9K^b1Hukhbf0a^v$3j|DO{Rn_f6z{21^rz5h{t8%n|I$(o1m`I=w3eR=?JRRvqM zg2W*QwADg2!?nefe8xm-9bN1@=J;g!fGKNJv} zm-V~yfd2I5#LX1GcO}Ssc0ZiNe%PpdNbY9lD-%d8YS{H&B*H_?2|cCdWpc{xpy>e3 z&R8;>Si-vadoJhTb;^LGQ^H;m<3#0RNi#235e1&$LJCaXt7jf&ku+<=Wncq2^il3| z-Y73Pz`1fG==s3k%cU(v6{%dlmG1d z#;l{Dgz#im#(Kn1;BVlUb+os}DxHES!kpN3QR=duSJj`{$-OtN={+CPmmP~VSv`x7 zq|f~PTacyIJ6>%=H`4t?OJKmmYPS0uD%g-|^mWq36<^-yc}<66_Xb8FF)tRM4#rl} ztWcGsLW(z$iS%Z%52hA;l5$8t1Nw2LEE0<1NF5|8;PxltNweOkmG3?d{;F$CI(}5Icfbtt`M*QS@t`Pv? z#13=`(0W}9hsS%b-5z(lck!CWIiAHZtpy*Q9S@l8myZH4{7a|-5Cpy0 z*yApP4oj(cGnZNAb5P`nC}_#s_u+TWTyp{{|Jca>c}0nV`UEvPSRxmN9dva}h%hjs zm^kpt@)9fOEm|UN6ggrCAX$GpL{0XK&*x*Le1`ZlO@eCUT6br^_1e9M`_@pSDhDG# zI=zvy7H4qgk>S^uuM?}7EWf;h-Y?h|67#t}!SQ~$73_w^7bhjqnf^*>)34uFCg-sQ z!eXuX8sslE%!WTJavoPfoJ>Bq{zRZG52AHzmezU@=k#k(^v zk*noh6~e*Yrh`_0)SAEVLizbMDdo{T%7%1Y%zGBcyZ) zFb@2{pN2?vM(EExyyJO=Jcbw0=|X!*74cBOq|F3x7C)u%bU%tM43TI&U6rk98aPYR zr3m$J@le5UaXX4yYx0G1JA?F6s`cLTo{9RJMfbAyxq#t$$c52ztE1uPCJ%??KXMAV zD?T??nLN34a>um=M#Up+w$r87ssI-QT2dOWfYRPhnuk<7qhct--`KCvn}YO9iFA(2 zHBuQ!6~W;fb=gC#IHlz**!tEt^imW8z1|l?`v6*j*N>H6AFWl{j*@G;&qAi-KQAIy zLEw(3h37|0NdHU4>%JJws~(>6^rLSd%v!!w29-(6-v!iyLL+(KX9-}9OI@skP`7&s z)Omjl|4m%!f<&8NLIDR-?&F8OJC-mpM%$5CU$sZO1xpPLXUzt%nILvCYxqdhj-E&; z=7h8JV>-e$V2efuQQ?`{$z`wVwvJMOjsTA*$&=}ptXc8f1m^P?x0hg77;1Aoi_6ts zL(mm8p(*J5bL>Wo-A;yp)!)!2Q1>p{>DRsHflJJ3 zmW@F?pf*XMV@}nId8rf=3C6|_nhynl9yfLezYV|P3cz)6a_V*8#|9QSVQUxMnvgxn zK8NgVYl^+Fu$Z`QJMLz-8Weju>3se3<|wG|4d!d-syKZ1fd{NTD~nP6+LJwU1tYbg z+mwZcz{BgGp;RmHwc0rTmdQP*8#!JG9(R4#(IW$R$o*V0!d`fIr^Ia4Q^nLT@lVHW zYa92SkkWAo5ujAZYs*2v2cRKqoH9@tCS2^T#Ggh_KLTyJmBCu{eoa=^pYW~l*)z%N zqCCQx!)Ko4Nx5u1an_X9Ll-hS;mxFpcxTtOG zoe9fUo;kMZ8V8eZiT9o!2^ptmM9DgV1~@Xw3(yo>OQqx3En{vxYJI>a`H39PLUP*c z13s`|`J#^`>nduEZq_%IiM~}3i%$>>NuSJmuCDQyMO3g7TEjK|I)~@G`3#vp@?13N z>RjW}dFuVdgt(R)@UP&efe)5rz<2--vXK_28E zU~GaDX@|;%cK>+Fmvpucgx9>Tmz1+`D&jtg-ZoIiDFQATae30uOU`APBKu6@gWF&uk;XCEn`}3 zO^fc|i%R%)tilMz%i@r?B9^~*Q(3yS!a?mroKap@Mz zN1@|Ou5Dq(CUY{TC*~CYWOhtcr`EDP43$FpD8sQDsLFy^qj2Wyy9R{Cogasmgn;Jc zAXdy5+ktb}$A@HZ^%+9i^gu4lT?zHS3KIohHeFZZItm` zNP{*7yFfd6R@pppu=d?3zO$aQ-7B^De*4(o8iQhi_=uUl`@i`{LHK%9fo)GS9JG!N znYg>&dx~Kv??KKYHF1hs2MOiM%GVEmHy@E1ytbqMFLe^XnHStsL7y=%s@?NM1EOTI z9E7!r6o0>28K3TroR5=6I>lqC=7Mn)P&T<`E6G9!F?GZFrb}W5{Wz^W^xI-oji8Y-uiZ(6*FNE2p4zv-|okEplxNiQ%bp;e9ykd;(Z`$Q2u32_r6l8Hs})=!Zm^xr^YvI??6l zN7@b=nY9&qxb!MmEh(uvHi3cDJ|}?Hn+{Wp%e1j-`T?3U!`EK>F!%5NX%jwWv(uub z^ZHCqi#CqhPBMslJHSjJ4xJp5L2Oj0iVJB3@AkEg+R6rMPjMEV_1G4PYD>sKeoT<% zH_rWck?F9&Na^b7X$s_dIdM~lnV)F#%gH#iq|0XO)#WUw4Xykj6>^3X&2t+v8{=uI zxUkk~b@~I^E5T8~c%}4Fs^`=Nv|+vpT!K7y%rmxJw=Td6hBgMSQx26_JPjC1XyglV zaQ?y+{i{~`_R+(qG18h6`iob_#^>P0UzqSm&Q9MRzGTbcBHd?nn|Yc)lKQSe0R!sW zf4&yrqIKPL1~x2WzI_(dNBB4#)IrJhsAFX9O3skQXU@$;<5avSa8(!I*NPWyd^%4q zevC&)7df7ejN@Dc)+x4SpT=J_ma@m(s(2*-(R_}98UiD!(2M6I&gE~oAVfb zXT)A7-yW~se{~o~;(AE6ioiM(a{pVLnFPY_mP{7Nz3c_!NsWP^v}ZWHGXpe>c(q#7 zJKr^?@H%m%DB3&n-hAPjeV4Ex0d#fFmbCV``_Z5iug$)F7UUAlm*BX_)&m@W3 z(ER1*t+pxAO2+zAA9c3m6w_2oS=DCuThfg2?Bo@y>1x%v(d^0RG0M-3|B7O?u3p%c zUi9J`!Hc@D55C;(#+Q)AcqWEB8t#gOeJa*W5HWC~(3 zC<#G;LB|^?aJ{h4h{H3>b&GlAM27d>*cuSgHfIJsLjGw7Aq!YLrXgJn0hf*w{$0a@ zq}mQ;bA#rcnQio?(6%l2wMUp?d$a>kw*Hk2kj8niYq%VZZ}#8tS_L>5dNQ0IZm?}))qRgoy(Qm< zRz)!-7{`Lrybw_~{0Da)>1}tkO&E^W{C&T>rgz-UN4KOJD4cA*Z+-bSu?Wy|YmKJ8 zQfu*W7|U~c{H}@5PwjI%gXipZ@>mg-vF-EjkF^Y8#t`fBh z9YJm_=Bu6SWsI*|RV}a;5?b!u_~!ms;OJ7 z1o`{0#LF-7O3`}HTwR;+FeB&?rY5(UN;UYi(kSMEi_M8}$j7ASw#6wYfAJ`9m%DZ! z>P*%v0m*^1_>(Oq3rSzOrwu_-T?;t?fc^nTQ_npJY8d;BYYrn0f|dAKu(J%VrQC|2 zHL4F9*UIm6jCu-@)(-Z9v;S46&r;8kp^t2Bh}YrD$rC!Ch)HaY8b4I4Mpfm+b)=sV zl-#hJNjt61bco&%)8nVBSZ37E8XtBn=}dJ-YDLOxhBwiZP_cH>a|}_{@VAJOF^?Lf zkaB-d*vt9_gY$f(X0?&->BhT`*hATENnAO1EAhTdP<812EDq)mdiNB+?zcH=2vhN$ zKq13S7rv5}dG`-A_TjaQrz2gGb6#b=6Ozym<3jEcntYHqyW~ueWJfTZVp)&YIp=Zu zF|)fbnV}y)Nlw9kEVA^%ws#$vXuSsuC~tW2fj=r4$NucekOD~GZ#pf*0KbkB=rHRh z+)UBvdd*{TYLDzXcw6^&JNfAF$;;uvV1=d5VF}GE?JDxl`=R$B>LljHQqQKZ%=B&I zUW<9-N7urOC4kF>VOa}pfP_~;wXTLfIxkV}`4wZi_DGG!Gwal^;4V42-EE+vLkEz|`Km)L0 zfnR421zg9MJjr3I*GPg@QYV#|6Mhw;Y(nn4zKs0GqEjO!vcrR-4|Dhp%~yl%BlcmK z5RLfXPfhc1ylRC5_H!%rP_r`4sX&VvITEeBTCe)A*hAZvH zA?zs$>lh~AO6d4SLq9W!DceIu57$`zFvp$HhcKUx;!B64nK!gHa<-CnD6nRN`C=FD zjuK-Q(E^QG--^gtm(z@@VH`+LQne75s*tLjQoq5wEWo)@)(S<}y`ZfjV$&2=QFy|m z4Rf#vr}^UD-cFa~iP3&JQD z8|hj~JtZZ6R*;h^jfxWKW66`zuNOxW-;=Wj1xjtZ=93EB&NagI7ksUZ&S0gb`e|vC#E?Nour~a(pW$Xv))WxGQjZeM)E$+}~ya{4QG!Q5knBV;jLI zdB4ucR^2?J=6UO-(l7*G^p0p|={R3u>m1cC!n^vQL3B=US-UYiGyd&?&Ierh{U zgq`v5keELF8urpqbp()sznr$B)$|5%7d?uz^XXjLSlzW>w9tp1Rg^Y$b~xgWU$A9G zt=+Dg`y8ncmUV*1$nrRgt=B9GmWY8GUSs6oSe5+wR}cL}FGil=fRsnHY}V^S&2!s( zzL%R=gl?kuBqjarkNi?6ee7mwSn~0~UsIV|gc!e43f^Jrv>;@JHMe$BR`4$f;*)AK zYq%aDvgwo3XAwirK?uy|2C7gtEH32UqVs{Cc;m=hVVo*Ku8#D4$xi*C)nOfq>8CW{Hnt%4Cibq zb4&e=^RqqGwb$Br>i0UAqbo9Qq2PoN|M-Axt&m_=6L1qUGIBK1#z>-7* zNMJ9uyZk}=;^7rM!~ahcT-}BYtyOE6g9;HJCdUQ7^yJg_6m zkMT;O33G|uziQRLb3MM%NA-D{a&R!hl1`}L#W}mWtAAD91HuC5!cW!N1EjS6ZkW|C1$kK(anll};PtK>UbGLgL`O z@#y%PQ*kdAMS|yt2p>taSusQ%R1N!-l`{pF$eGBdbLwTON+3o2Qjd43C*XYc1riHWk?km+bZ zs(>s`7cP8ZQ9m|9wGNBxvpAQPLe>HLhi5z_5xV8C?P9^y`c~moVj-x5{MHw&?Gpo2 z>}J2Q!y<1_&F(SG6U9ihtrrE@J|e;XPGJ7wE>vF)FAmxnN^>Avon)Z*XDi4d}w zda146*7DBBH3UdosECljyYGC;k^4kf4%xhqZJP*RVPfs>(w9`2`j*OC|B^R-3BZ4d{&zQP;zGF+#*ZZnzp4i5D9nntf`Ti95iTJKFTyGz2` zy>K6m1MuVG>B)7YCq1~p%nj1nn&NlWURAJ_Pl|kreRSK09DLtk7(68c2lbua?YKy_ zsEC^%pP&hMdc_NwJ`BT$RXMstVNJ7CiALSwU5riAW;U}pTLT8TNxGmmmJitP({gz{ z9;dIEOSNIfc)-2B_lvuo?l2X;P~6U0bKiVuxy{O2)T$P)GTnjRozT8G4sf91!%5i@ zwtrNdl}=_oN8u$Til*9Hx8G)>oy~TSh4*t8^t<;V%wRhO?e^hzIZEMw1AhQ!TpVHv z*kms~O(0q4ZIQvcZX&O7yOPY4t+zmAbZG%mGe+gFv3d6)r-Rbe-7zRE6!Tf-$`0nw_^9*^9POmP2Duu+6=t9rNn#74Kdme zISEoiR%{p+d^vUC%>H~$%{EdUi7pCy0D(O&(f~YYxIoQ;vYI(fD3zHZX#*oCc4H79 zA8Qf+m;RcBo~S{}YbDl5KSbW`fETl##CO30__JU}-x8`<_ZgK21u<-b8=7`j4=2g{ ztPmjaX%C59c|L-oFIwcDw>AGDkgd zify33MiQ?rtLMuM9T*w)PAvemmy*7p$_pao4QFGo71@+ZKFjuxh>WIhV@hQVA2jxT zl}c|m@aQ;}JPtz_FO`7@2L?mXd$Hd!kgar{E`YzX?_VJ3#C6b`26W?!4;f|N_+2R@ zWrFox2^Oj^A`Q$^lu5{a+;#Tk+fAprwEE930NZ{gV~czQZ#%b9kQ$H>J@RFu2=2Tv z6nR63%Rbi_+h3I_4`t^SThxIjhC4G!;>0e8=vI*Tk44YLpuY`Q5)ql|2^T?NBA(@}NCDIWTlC zX6OR-n>(WUmRnHx;AeNZ|mXbQ?`#bt{=GyVZln3VDOP?t+3*NHe>9$)gE zX`dw-hHYe4)9lp=-s{Bb>e2w=T<&=!u-cm-_DVxtW@HcnSU{X0#f zOSxC-9D!M-PI(5b-_O;5BcdH(8!TOuf5AU_QAD{LlA|W(mReqgrCe}_L%HmT3GOa~ zp$z_jjA_lROiYaQ$%T9TRU z^_tq3mM`D;2A+suNn^Z4Om8qc>ulYYH|)W0zd6MMA{#uUuYIGHchwaGn~iEmVHH&_ z()x~Rz%z4?v{C>|7Jpx#$b?nPvPU6cj$)FtcK55W!YYkl(flu6gD& z-NtTkR53sO$@cPpq$2OivsnLp~NmB|N7%PP7O#X!9&4kXit zWOX*CH$QT47~sk$@L;6Vm3MC(Y@9BsPx@Ms`zCwKiqqq1R(zSpBW)GrRZ`#7E=f5+ z>eH7ZI3@a>$UB}?LrZ!!5>07_izSYVW(3MsE-Ax^neFsF5?1PV*4)^z@a1-I)2~{Lv;} zlb{0o15ITZI17!5Cmo9o5$7ZK`O<3R&Zql<3QQq1aQi zX54C1h8cYiulg_pq3SVAX3xQl4z$TQdGnvN?`gjGdJ?J}sQ2;c=WdXZ;8Q*>enM)HZ#?<}eI55;rERxA`b_r!h&|f#HNbZ|y z15B2N^k-0T_zM>Q&u3E}MJ=dU(q~YdVeSek-odMPY=Or~0RL@P(HAc{o9 zY&A@>=`x7(XO40INq10v{u}yDcHwk~AS_pv6AY3WHH~z=kd%K}7h>JWK9iiq;!%cd zN{;GR3PRvzGevI~Co~z^cF>MR#1x;ZvIt|fL7Y@+SfjAXVy<4s4`~Gxntq~>X7ObT z0c-_#u|ld$AiriC7;<` z2OI?N{8Y51f1Lt31DMXI)&Qx@drqXMB+1VAn5s{caYR@K{q5*@&@_bCC+7qn&NvSu z(MxoYDT{Wu>UFYR_T^Il`zq#%=CX~@X)Mh5{*MR$=mY-x<8Xrfww-a3)4nCl<+7br zsr$TdcqZMXKQpG<>3leHpa8LP@3r*Na#{6a{ZtFaNF{h*N##&xo8Ueo+ih9 zge|TQH+Lquz(FI`7^>kYmZ>4l!3Ps#r z6L_xX`3Q3+bh{Xb2O)HlaND67q3Xjoo4CUf3%C^G_Lp%1g-xi{OVQQHgur+_XeV{Y z5v5>O0I54P-&ds$srNDLVMl>hC^nYK)wCS30S-$+0X-1;_@99KC$7?2{J{WuF1GZQ z|8d{Hz4+(TBCNMGO>su1YWw?&-Dj)qK<o)nx7xgmDkx3{EYbeyiT|C^)dYtDrGij zlo4unSM3abw=eg$-`P$Q@6*)}24*fAp^RA*>x@O?wviiR#=GEUyB|BlY}02R1}M z=eDcwA`2&%q1Z#eho*ddd;FG-u$ypnI;rBmXeS~%?>r}5WNZ=yk0%4=GwvsSVrDY! zyM$L76`@xYl!zXGH}QzsNBy48)V01bZ6gPY_@7Z*q2>MuzyI595t#lI5iZ6J)&D1e z{_Fbv3I@v=>NO-u0ZDFXuah2BrI)BFk`|q7hZ}ptBs~?!~R^H|>sY-gA3zQrh8fGmat^0&Yt~*M3peSixKF#QP9ZqRsUEes^*|iqc}5cfwQTd` zis0V*jzauvt|a2EI=QJl{d^*P1owC*&w=ycV@n04bIowg>`#9jJ_;mt^zb)j=BL8{ z8xlqSghZ*P`MiG!iPVsMF#e-#l`8QPH19Il#>pa*pPlk3Inm|DbSN^q*3JUM&C+Dx zTVzCJhlH{AZUWMIhN&`^sE`jLTSqP9CDMZk^fp3x(DmdZtb3LGO7XeCeLl$?4`WxS zfUt%SS0Eo00|7N)Sv>xnPej<7(~hU0@l!PpxRX=zEb3QqV)4A;PUvS&w?#mWR?v18 z@?v&<&exE3G3j&_)G^doBY^i_~1qZI4@EPUU#gb7i-s9 zCF8@TmM^ZW*NcRoafs+}7~a|m&CF=nh6g9@V-OBdNuZpG@R;uS)UNrc-GQ6BVph?6 z1H!jvNFfCyzw~XcWLr)L00!#l9#(2N6Rn*I?+KW zgDQ+e$-4=`8o-hU^I#rM%=TmGsARQl+zM6TZoh!d;?zrlf6xx7Wm1y`;-N4~nW15* zNg=ylP1{>g1J_BcQ~TZ^dxY~p)Qs@E7!S>ul1o6JaF9#pkrD^0BknMDa3qZGko!*~ z&LChA5^N4Vkfl+GaIx=Jq&TLFUSWaQHE*PFmVI96JuXbNC{#qvsxvCy#Nx=-X_SQ8 zTAixlmoR?w*5&X0LeAN^ke9pInEe6i9JkH>_jUBO?t8W$bq563X+;azj_TK~qMhu$ z;)XYX4bYB1R3=HlUDuwQ8)%SlSUE6scxqyv;mgd*&R<{fi8X;CJkOY_g>rqZeZqwo zh8PP41+7s&(O2usM`qQgaN>&&rx`_GB=zsRa~u(Xn^{W+r=hT#LJvUDCUUOo`&}Sc;~;^;7?Y$7uyU+&N}14M%U9b^caDqq$*9!1kDlIP{KH; z=_!&@?2_Al+*V}S6Ri2h%w5=T4^mFS7;>Wq_10a#;A) zirMn?*)aq(GiVje+z04_JsL2?-BBSqQ9G#%A5bE^AeqF=`ji_lA1acOG4y=`M>#%* zpx!&j(6M=v65iZroaiF@SI2xf9ShCmql_w<>xU zhvl1AYxP?GnofI`Uc#g#!%G6n0|#Zg#lqrXl>+|vA9z;E*YvD&YADs6R4}&v)xvPI z&UK^IyRCXSNdWd3!x31m+r#+#2_)O0OZnq)z059?`n0#{@w}ArYH}mHmReY)OjvJ{ zat(t+!npKP8u_vrO4-)Mc2OT!w33X+sO+FmSXlau$mgA}DS>;t1sGf2wG_FDD zj-3sH@A&QP%|T}IU@DrUraix(ubAi4Hmf19s-v@F?fsI)khRP^6o%)0`{Zm1OUISx zNQW86Qn4z-M=sEQAZzh%*I4gdG9TYq?F7<(q2NYnIrIy?;HZm5kyoSob$04P;zf+U z=%KX6M8o8xqaD`ot!v3egCTq@WJ z5I>NfHW{??Lc2A(-5#MO&sDmPBa`fL4_+3$7} zkfMWMXH)K48edy@uA*d`=xa#KS?Nv7ncrb9o$Y|U1SO5UMY-_-N32MZnNo4o(I1=? z0Wq9FC6~rs+gFbc9~wi$FUQtW6${?LUx3YLM(*1%Nb_KY0Y#pJz`N zhKJC8$A{cMwttTwSe_@>J?z&ovKVSR8x7`C)QQOIpMhCO8#KUt)wTSiclx(A)5Jh2 zb*%o`^6Gzr%YPYxF}6P~F=0HZMEqfLwD`)$k<4tjbO`}Y{vspvO)}&+(X+tBy{vKr zO^-aeENwn#5!u#3p=_t=yTszGxZPSr2>!jQ4kK-Dk3EO_(y@==VP8A+-(wBB5(cm@ zhhZfKMaCkLRtq^80T$T9qoZa+jSM-npANq$oXmB?d-m zN)8aE_S`Rra!0i6g>{cDB->+JD6(XObX0Nl7XZ)|Ir`O@1GOIbUltN9g@MfdX7K14 zPYaJS&KQw9fXd`P6pSoOBLDa+mN65!+XPHrm3orL0jFBZEC=f09NAeDrkp}^)kE0F z(|eDCWV+MiWq)jnaZ@cNC?hP!5Epo%bS@c|rrsP6!#9iy4-FK*rdHeCSH^NrHUc;g zt5ebteCh0Xz)Pq5#(v{jg=?r>5~sjsr6;>Eyy4jWr!^yw@^qWi{5ALc$Q_8-khhL= z70DTII?RCwQ+}NmaU2anPemQ?xbK*T1^3SuL+a}bsv}BkW0;$y8*rxe>N_Y!R;o1r zbErXm+<)9y#j^QSm`$WMY*;WMc^q(oJgE4FPmg9z5k^g3w*9CfUayl)*F%m`8E5 zp$q6rD$GB4vGqKnZ@F$2zC17$T_)jD%UdJFOJXaq-c*y)7vt@Y3w?@y_e*w!q^}yB zHRr|ML!i!;_Z>4{V;=eCoDB}SyA{g*^yP($eaZL6kTKUJGA@L|NIgOs$nAP&qx(FN zA6z^U64*V@zF~vRf9KVQbIyiiXi<%BPxG{^j81~<1x;6VIS}?$N{;35&X51dkmuaF z^GZR8x!H6q}fioqcl(`f7E|1ovuA5-@qEoi9xqp5vJ|Hwn6@Rei|T21GUQ%miL3yBgIRadwOQyhXj z8WEM?R)^RYj(+tSd<@majOWIz` zXb)@V>Py+N8@5rQ>uUz*-F_+a1eHxWBV3*VkRpui+0`585XmBE^LQx&N7}C;0{%?c zo{m62h;;Yvu#*Yc6=J!l(^o(UQ_bs?GpcY>r!NSmse<1jBQ5JO}Z$#20K9fYvUdyLB5mDuPfWvvEN$3YSMd>Tn7YOhC zV0Sb-EUi8Vnn^>M!a2w|dV~nrvZTx%2vDJq&Klyth94A_F$ffoXp!mhkIE{_V!`@y zQoRV9<`(!)o8H9lewCuMLBbxk%nWd1-CGNkXOs*u^kYp-Oz95f*&*8thy#yR3RKU; zw2Os!D|WVvy^`gEe;zjETbdmV2(p^Bxr!yBv5F%c$q;;c#t6_h*IfpTEjB7=Dil)R zVyj9{!)Sc4a0pc{{wdh)b9F1|YogEXRfMg-^5()wr8|rcyX|^%hD=GVLIe)(x_<4p zno?qf5rPv#E^bykb#c@xX%7}+kQ^C84dC_ua_qR3+`S&&(npEM!F%COT^=wnQc8@s zeM(g|Fs3)h;v?5pOW0(JpRD2Y_oEwu`^U&7Ynl;%S}cE$+kgE%5JP^;aq!~4u1AlR zcNhpAra1T^mR5*uSWA;2gFhY!XUwv(JI`liE#^<>MjaCC_^v+l_MQr z43o|`*d{C#V|k0fN9!dK7)xVwf5ag~$!A@JWv9qFY;fbZrvx9Rmmy>C^w?t(C?)M__iTFMH`WJkJ$igzr`Y7o`P!$6N(q?_{4qt6UeiqE?3&@Z7P4v};+ z_kk89Rg2Ms%rAWWTruu5q#B}x8!J-<-Vdf3vRG9^gx9=k@pbNSbU~nJtAKH$GEZD^W5zZfwkpMoG#N|oIiYGn$rwB_pw#7b zaq0YZeuhAGgB`^dj&s#^fN9}^&j?z$t6#+HJEJL(DX3_HhZD!LNmh<)nA10vfp)Nq z2;m-33V7o!!A&1lTK7~#AMG}ytSJ%wd)dfH_Q%dR_D=M5|3F0wZFZnGZ|Buzp`3)F z9gwj2_pyTN$270WbX=NV`)iZwk&MsG-fT3syy9SIa7l_EMtFE?d|Q> zpL2*q22Rpka28}f$y4?jYyQ?&4@NKh{bV-!#=FbJ+NfEci^wdG=Ae!YYdlHpSyBj~ z`N+h(!W6qxgFPvo_nV1-BAwfaD_R+OSwU~|ws_5RdFGRfVK&NPcG%;Yp@$3U9RUX! z-f&7gd2x2Zc;k!p?NgybIlkNVI&_Z3N{u?@js+=Z;g=l*f;C@kU(^Kjn&ak7C3o2X zrJQ(3xfrgYjw-Qop`G0xZS;_fD68iq{Z`-aIj(A&1RlcTDq^|5!7>jmzyD88l=Pn2eAn?V=1A~QQ1~#QDKxL1w0;J^wH(8Tt*MV z8i1lW--6L=H>Bf9PBljx$_6?j#2cB>G>zxO1}w9Au5o(sUVePy_YV%w@FHBczTy%GsBbK;?cuQ#n4dbfxuX6lq#RBvt^YUpXwU#1KyZxlMnWUpVVQq>`O4zV+9<9eV6G>KsgZ-j zn#@a9{r_a6BD(wWCM?9O)Jn&}(im~3rQ(PTzreyK$nekzGT;uRJ&0?nkRL9&_)|8L zj1rA{*7;FSOS(sM3d;B#u{Isf&K-zrI>rsKA&oYHv`C1jDruIGtyMo#J4p}oTIe_Y z7I(xE0#I+1o{+b<)PL48NGf5=|6DOrbVzUl6m{M%f=vkq!Hh8SrwiKCx0PUN50WH( z1|KIzoE4VcWu}^(kN6yH=8NFjVNeUy>cRoa5YS;JwznRdhHPgmW};gw477KCD?kQj zuh(a8EW|*bup+_?N%CCzl*m1-vj&MlVO!MMjWHQgrMzT6bktaTwmth(74`grpAK$2 zU&Ci7OwVB_Vd!rv3hikJxKkT(1jk=_aqQJx&EIpYK51>PzP@2ur=55jufvZAk1N+e zP5*LO>#B%&GHoI!RG<8U)&Tr_<}-o+Q?Y2iRnGlm#c~Sa|7kR(VdJ^@-TArgeePhT z*rpu@G(t(Tx7UZ7AeAQMfb^LSi~5LFio>}>uEB2c?DN6sXE*H`h zLs4TRJ1IB*M{{Fw3Kct=O>bY#?Z%rrf%6 z#7K~$OY`W3xnsU5CTFzlhfG9cfXI@>ioawS$&IKocOd){&zeaK(%{>=-;YUa?;h}9 z+qM>EQzjfvIA=XT9K|7j>_W4UGMQO ztZ%zWpt1xSaa+bOPJF*3P3c0+QPF(~o80|gwhn;I_PT3u*)Xgk{+`^}a57u0X`%!|B{%|FXR`gTJ-?}wIRn8HQ}!fIsfIo>;59C5cwBA{~go(x5tMB z23ni&#dgDIhJp)}+UIOQ1=-oY=7pv2!<=n{YE%23ESOB*CQ*Xi7&i@Sf4dZhHG)8f zWQjdE?FylIycBSlh5C2%O^D6$xIQ_k%cP9$9P2yt6|3)5Wt;{aa-~J|!B5VDSjrxce;1`I+(W_YI7QIVvg86=3iYx23sC%*7qw%zv>FDF;n zHlRoI2XCq@0U!yzUJ4uG@QhqRFvn$U{q>MV)9+lt`gYm*>~wChp=sSKpZDfB`dJnM zcka~HsHSYA%C$}8j>=rQYti8aE>Xs*S-PKsz)@so2&}`J` z%g^u{aN^fG``IsU{~NnwQvH!3+CMdXQ~4XI`(H|(Q5=R&x3k{+v=!d@both%?R9-{ zC8gSYAKAt;x8va$kGBQw=q;>IY1(LnP!fxxB(_AclNMcb?5BD60P?WWo#FMz!L-dh zV)H~%$NpO5v!~JdfNy(t!~%;f{$i_+O2KBQBUr*`m_cTU2}yTfJ*WF|qh=*_xqFV5 zy>^}ABE9&s1At6lw11+r-dHE#w6-5MhQ?_a&$bO|G1h#_loGiWxQd{{WCGmam_?ZCljU<^%79Bh?qK0=#vh~@=LA5V1 zGlzLNB@IA)ww_IxWXh}mFt%D%g^scqQfrl^;f!2 zBmbT-ry2sVi^qoYap(=;KEf%drc?}0YAx=vM+q9gH>-grYo;8 zJY`hlUPsB#EBnQ|>!XkdTA5+?e3lYw=Fh_9=E1UH@+7!`5nG!3ewS)7T}`58SykIj z`6N)Iods{ z=JtiHt^L!GTp9t8vl5qQx?k{o#V9a$evYvqi~ERAApJI&+cF7pG#SMVK`iv_+Qbg%8me0`Y5Gli~|Y#t-BC0 z_uUtb+=SVXZ*roGCmbu0~7?7^s30`Ho+=>1a zZ2{ovQn}AG9?Y7rY~Orobh@8tbiY$5Bh zjwt1bD6QjR7Gfn>7Ci_IjoRF|d4*4uV-W|e<%w5m%}?mMinW0a247E@fvEaAZLh6# zb?D;xzqDch4sG54pGqRo!V%Emi!Ddl)k7iCtCQ!=FxEwO24Wld#p#52DsR@9ed)m&d0_JWr+g0yhy+{1N-kI9F;Cao?Xe%`~ zzTr2N@m}pZo&}}o^sK_E&Azl3(VX9p>kG>}qEI}pO27Gc5&>zrDj(gy9^W_ef&E@F z^y=ec87F$~6|<6 zrZRDK_CoX^^2~`3;Jq)^nY<|kfe2Ur2i9xF&y{4Hm`k+YCwQdGulnu(l(auF3 z?LFNY;1gVDeFL0jppAZkz8w8{tj*qme6YHrRIjG0RM2j}W!~#ZI8j`8VDWaPVjKh0 z>8t$X^zJyTc0#y}!xH^Q_-=#6j;3ggl=?#j^YnAhB-=^``O&0trX+AQZRqO7;(PFt zT7E%IS=B2?JI7KOW(1(8hOG>lMQ3(Xrlzr&XQ#?AgZ2SK_DZf@+yKPH=LO?yLb)%e z-}%CZHmk9{b%EV_-COfj{K|O7q%wRZ+hS>k(OX4WR`FYu*nEOew@7^IlsavFZ=iP_ zNch+7n$ZEN!{e-@m&-+_x_#)zj7QNFgN@Q(xXTFIY8ybS5i5DGd2+sBUD2x=m$!Z6 zpdEtdPY?EqM-n4s_U0OqCa5ZIt`ynv&PoB*KWjkK&g<4CZLf;jzvW;UUF>U3AFVgoibc=YTWu&gw8~l}9khY|p*%HxV4`_@4dL4z}^r#{wMd zq8@Y;7lVW`<^|~#2J^PdK%CCrunena_x&K%CK?&ndc&202-PNZReI}5{&z)Ba6HpH zx+fkgrb-AXw=N`%6L0y2;kVIi54aBHXt`3yGt1BBp2rpci-!eW5JvbvsYd2%1b)Kq|;;4@ImakjW=$uJWcjnWkklu#Y6S4 zcI1e+MV1Vi;kWcAWQp>p4=gqORxUW5aTl^w13h>=i|em8V?L(x z%l^^yjp9g2XIpmR(WLde{!oH=Tdsp^F%4&K)5GDPZVz;VCZ01v?MW3Lg@Ft$F+j3= zl`P9>@}Xn;i^udBFliaT&frm@#I?TAjXr^WobIX!&nn&Bd35D$u7c6%p2_;x0CK~O zoW_!l$JO55kRbs1$3*JF$L_^RW}BEQ|Eb%!@%wndr{BD9-Xylot%ldFQk|Kt$N#bQ zzuWJi!nEX_b38QPGVSjH?7!8}Oi(us72yhKnaaZ9;?w@hSxo1EEX#e9=D8R4@Psz6 zZIPB5d0jn?KPL-af7o9BWyJ%g-PaEcp- zL4nczY7&W3b8@%_jZc=q5|im{+1X{{faqZS@>6?84F7jUhQ-?FbNMX(>90U12IgZ1v{`*SG?szadRKltf0R#rQ+vrkyNSD-bOdPTP6R+mqV9OWvUUqKpU0An!oTB zaVKhLk+lvXBgy6PR>lp0zMXPry|Q5)!xyE^B3@y3!mzmbEyv5r^K5QmSvZDo&IPoD zMr5r{Y(cQEHTn02X|pa_r?ccC2E6iET?ewej$9TJ(#Uo32||s$%KvS`G1C5ZgRYz^ z{*m+5vq0r|Tg+c^^#w|Sh=h&~kkUT{wDV(qA5_flwMeivDjseWyaZY_K=a+!K&YteM4f1$C#SURPCPr3%y1dDCZt}cS@m)*?R?~Q|^ zG=g76sgj&sKEr@?&ek(XBHhWpyeO8@mz%V`D~xp4YR-ci(?(KQlf|I%CQBGS^eEV(rSi-05`U?IAmZ$>Uyh5E!>%@CND~&CftYCrUd8 zz#oj>7|hpxP?y}Jni?+7G{bkIWT&q>H5v*4hf=Z|*vGUuy!M;j0qI$S53QM)}jAN zM3}LD#cJ8$)tu!qP<-~?R8i|u@v-9#{cW?z5klpwl7fSa8yL3N|7c-q+_Vk z5vc+qy@L=49YiG5fLN#jL}~~S1p(=X-cd@Z0qMQ>-XRbOFW!6ZIY+;|_Xj+G%4hA! z-g~V%#~gFcu~vpmy|sLIvzJE01?sOnn&y>=5$Rr4La>V#heV|yJ1fj~fu}BKCvP?k z{5Pv9YIBYPzY!O@>^UZUHF~ERJHtWe9EJ}rJ29Y}&-yxl{I`#G#*)ZMN{zByMYDWAyP9#5t*-n^BeWQyC0$(qY8wM(jwiHf%0p6=S z_Qy`Zk@ky$;63M~aszYUKX@c^CSGbd$BI)FY|vZ0*VUyj?Tt~XYX4`D6wmi!@ zY@c>--uj2RjSpGHQM7?&XIPnKXid? zO=#%7?i(+iym~n3UA<-&Y2#Bmz}ZZx+H+axul2fxKC`G~8)r$2p11cpKi_nLEcUSapWo)7bj(P_cnxVc_7?^WAI)hDisZck#4wHBqa z^U8tBp(k8Z&F(^%7aDLX9}h{m$N!80{%};wzNEM}9LY~Q*@ot4^A2m3yROp(F5(b( zUStS0;q8NTbq#SxBRc0BNfjoppfThDYyD4mqmkWP(GuMP1Re1LftQs_m(0D>r4OXU zD0rRe`76Gryc{n&Y#5H-6u58!%GI`pNRktmoK0f@K1Rp|nQ{kW{8qKtxwM2cp z4|O}B(=dFy81GNJs)FN}ay5i~rHq1xS`AaLBfKIDZ5^Bk+HA|h`DKtBlgCedF+KOj z2J^?B+bfJ8IkLF~)!f*vvefyrY2Mc2NEsk6doVA;z9{kg7BZp#Z^0h!{IKhDTLD}3 zmQiD_{H<#rzgu236CHx3t64KWNXHpJWyIX21dwm6(zOVyA1&_Lsv5RiiC zRy`;{OR&{haV5tkt6}W(?9?7A1PqM3^8-xYg&Lx#p#9Wtck{kJ^m`(c%oJGk<{Ial zp!$8$dz5I?8vQ4T1Upbw&35llyN+6Qd$U?XduYs3j(r`3?^?k3&HU0iVl@G27b}1W zeZJ<1dgl43A@cU+4Oau18vp9`B!tJqcBsvJqdn+-q4kH#Hd69GQ#5@K>J4!}5Lp5=5%JccJtEJ~%6!!Q`?`ckp-cE$S+>aK0i=#tm| zxcl{MMTgkw!RSi`bdKox21raB<%cZMu?VRabLGii8LpTZ^g_&=3Zu6$@=ylzTdyr@(%jfoL4Qb)t}4QpSNzk9szkQf4=h9y8K*$z|cFdo`P7-g-D71HTu7W%VL?P z?-qGg#$r}sB^g)O@IgM1EaMeFL06D@FX6jvVr31jxloP=M4-(vj;egV~dp?{f zb8{E6!NM&_)T12~!gv7w{K3chM)QxDoWYh7P!tWMD#om-^|)i?Y@ZibL*5eMC>6o1 zzp~Zn<1})NS($2j;1E}PhL?4DD?ztkP|AQW1x7R1eztgYljloq;faefTSunl>sLHt zE@Rz{G<{NMox}@j-RoRKg%*6uQ7K%GYwdC|sXkxxmh?1oY>`#eAz~RC<~A`0H94vW zy!rvHrvmWh(n&KQK=H$7~DFh(~%Dh?=htT2%9S+D8AV z)(#{(*s{7BwBoetrn(!qoPl}{NNf^3kWmEmOfrtd;`oE;7IjD6H1=@PPn9yM&??rV zl7cMVVd#m|F8DX~shok8miD8%b5o;a1h;Khs(@`bLXSYgaytGU~0fURI7b59uw>W;)M3 zfAY%ek7Mf0may-FPCAi>kCdXM&&Ds2*d9fxVlo2^O&ldgD(L#oHX4@nw3*O0JL|V=751COsKbJ_gEzvTK zvbd^ZP(T{{Ezfzsdvx(UNAUUZ82yOSd6@IQ@1wR>71xmupIe;b13t zC3~qoynBNOf4DBEa%76=%(pP?(wk$!f!_YruR~ibB|mC3nCl)`BO&}mG#*NqE=g~xDm>JMPb)Ui(zI~CQNLBOJXu^ZtOo=2*R3nF8i8rb z;bqu^v`!&oi6U6>RL&AWl|LyPX-YLGPfywX@^vzUW3F9hhYA}KZ92wxFm;m;Lm z`{Htxf>8T?AUL<<5b`2A0zEP+U|P45>*oPM`mIS$abdyT@q0@Gu`yW!n3Jfjrfsw3 zC}H|Pno0gayGw)|uP({Mh`idQSpPlx7aSr|zum#8c&kkKl$#y6tRYF}S#NGs3<+W! zDJ_^51uSx_1M&^r9f|2Dluw73k5Wb%`aCX$0h)G=vTGw*wxC-<9%q&;{vc9u64J2-H#R>7Mfs#*2^0 z(MjeVj}sBb9O@5w3{$CB3_YKK%!DhmWx7Gv}*o%IyKTsPh+I>JAH$p&BK( zsTVa8^_m4FShH>3!2p&WNj4tYbk>6D>P+3Ao$hn<>jUVI7o#=Yi|PHX`&#>-u|#IK z@Tf`w0ax8R?=%sStB2W3_`;X+&>5jt>}ANUC%Q>jNFW|~uM6Xj2@a6nfA~v4nkUBxB=4={dlv>I ziszPL&S_&)ROL4eh~u#v`i!H_e`xztxK`A!J4hD}O!DVL@!=l3W_-2H5d9=|!aR#) zA%4{W`AMGtV^|y&_YK^jcJcR$KuD|L*}!61&V0?7$cZ02Y2o)(pW%I9pXkRFG9B#1ELnrZ6^hu+9<%?mF#VfX(=F==%L* z275MAnx2Qmh0cV6!ueq_F zfR$WLTf~mZH5TKo2x^~)5=Ev|Ipvp`gO1|1&Zjd3#fr5e>)osTybgj$dj}&2EzqKU zMO`JOvni!h6QeDSi-k_?vS-K0-Rkpo$qhP=+M*(twYDY>7*}Sf^(IRac`;Qzr%`k; z$l+9Cr8{;!t5!kPQA#MaC$Co{wLwejhCo&pB8PpmO5kkDoNYnUnL6Io>UHX{ac2vm zXHhF!T>pw$WQm*v*K9aeJ@&r-d-SiM=0`y=s^&=^ypd7fNmb^X+7Jm{v3l z6IZ{uep{o9UVFpwwc^=p8cK>UDh$(f0No>Ax(wTyypfMvSCj%>x|6Uy^Iot0v~sdT`jk;iQjKlSY=3d}zseoq|-aD0d3uO-;5YPp;T>ZyizA5~q;L(LC!51Umw)0+< zB~w~7z+2fV(Ri#qJ7eSVnt>5fI>4a! zcbpo^!VAPj&dHcDbpercnU+(8GPm;_Nzd(W1U?SYMs==pyd!Zqx^z=ccd^zU1JHUofJs4OY0H1)TOZim!#%ubMrsuu`=Nnu~W*y_rVWG1?mn zB6R8?*ND%^7;&ONf93?K@3TgS=VM!ifpm83c1SJBR4zh^bfb=wUdHZND>!~_Zour4 zR8R`G++3fHEG)!WIwyCbM9)$szv8tH^QpUct7V@Z_4Q3ifCXg!&WG?M@BRoOS-bin zc4>v>e_7Js)Q=2vjj*IAr}r&m*%B?Ecd4Z0hCKZS>`YZ~1+Ia0bo!{Ae>U|(XQQapcAwvhwBX`NA*>HBF= zo!`dV6fgD&+J1aADUj7|zI`wnY4xHTQ_!Tons~c4I2=%`X;yYvGU=Igo?T4SF>=J8 zu#`Gk2-AG28VDuiYhM)X9T4pJysdb|T`Y8>TqEhyHu!|{yDkEZXt*5;l8P@HXacCX zt@kd@!Q_QwtY|~cq=-A$b!%5A7p9vze#b$c>%G~a9GeX=v5IApu- zBPWaXtVYNZGXDZp-=vA|#8%#8lDJ?|UcD0i?(Qyi*#77eg>(Kro@@%hbAHZfIYlx@ zC{?P0g(Fi-a@k;&r4hj>hJw;iY6@Y1XwiQ;m0hlRaHHARI^7G>IZBNim^|@7un1B< zDbB_f9g2>aNH<@f#2-LgO_KYbd#1o!#qguJ{TK81?+NIHL=h!N;v*5T@(+OE2StT@ zM@Aw#+>)=pc`3mpSOP+Qa*gjC@;uHBdT>WUhytMU{NsmNg2E^X7U)2NfxZ<9>UEKw zNU8;Jp#SbNaCvTaz+~aH*^$~M)9+wyiZf~}MSw=O!f;aDyKD5tliTRkq^#gJaK43O zuh)TEnuAoZ(qqX|qqEB=%sv|`$E4?rJd=26IY^%kZm;!+1#g4j=L*bc_f5Z*pj$Dei{yA8l*`A-( zmb{UVQ=L?)bweVA7$+p#Qh0FBU+a&b6@$NgVOocL`aB({i+O$lYCU7UHVi6v#UJ>k zL|HK0SpB}SfTjQG-Lm%PyYCqC3q-LykOPOI)BMs%3RZHVec(IcOaj9dML~ayr5H#R zH|ox90AdoETbLgXEx9gEPO{!;fK4Z?`(00pxbktGjG#U2)2w`A(#+kcs2y;38gfw-H$&Xv z5ULD{c7Ug*idkc-Eg|X>d$)U}kJtm~mHXUnA+eGdG4f2;JvkI|qH|S8KGP3vxBME(q z+U~+2AN6S@rY$6<(e-%Fs8{-y!KxX1N*25_&2?ka=!jlQv8i9H*WQmapg|j+Z?jAxBg;lrnOT57z2hBppHabp@bSUBU?IBy#hknHaS~j}H2QK!^ z;I(L{J)%N^MT4CgDR9b)uUka`?Cl z_#PgN^bI(w0|O`QZh0Z=zN{F0%ozcQO#&8_NArwt#8GXVR=Q=GmNGZ{Y4Y+5GmeNh!;PS%SA9 z@F>wOvDszzO6!%J&GbVS6ZNJZoHwRr7?62mjj3Al-LisS9EqfMQ`Vx`ZlCa65>nVT z>MlKx_<5d~q~mr~!9kXce$&QSUO~kwsjv$j&ZCB^wY7g~z-)lS_Q+shqWX9Z*4)b{ z5m4G@czL7oxFzB?r{vpj4Gpc$_mZA~dY?`x0z?|sDOU4lO_JE{_?s>v77HgB)hu)DA7`El#tXT*fqXf+RbAG zL6&ZaMq7zVn~jUImVcqMAj!ds)*TL+@RHEEwTPPK&QYIBl!s)zIxuqZLnICq?*k^3 zaiWv)RcK!0Gi2k02D-PUzc}aikl&(=e&j7`a;vW~?j#G`gcHVZ-nsG?Z3cn1Y*iE) zWIV52?&gdUFbua-(^~owo%RRpU^c&BRK`$7d#cC`wQvO%M^N_}9e zzN)d79u3RFVHgr5ENS9m5(EQgsS5*@B{yWZGj3=2oAKLRb>RaA*<7(3C%Rx*~yn(n%zsNKbx;u-S;K2rej*? zcrdIJ9MGJ$cmj1vtKyjEPm0p3u?Xap417iSC%+Tmd+4O*^&9)CC+dQbGP;1Qq|>v> zj9RR@un&on=_by!G~cZJZ!Az9esEDH|_k3YYu-R zO#0YZrg}9nXkg8+HlGO+YcjdIPk3*XCjoIgFO`7YIWp@S``D>Wr&Q5w&f9u!K&I<7 zPkc*%a?Y~CQE7o={P622$ujmqfCLxZY6;@I1?t2EXd-=jEBC}Twf?m>_{QZ>%d+^^$=NkC0ZD3kA$kQNa9#wV zz!*bMKe2UEI(0LoecW!axUp4R8Nj6jty5-`Otbg4XNqfzO9*yV^n81;ow5=(3Y@KbA3f?yOZ!_rcDWlZL55rqKnr7e(IPL)SNd;AB@6yv{(s0JC9x$LXT;#s*3~ zW?HlfUq74?WfJ3%T%4L!!B7GhA8^Uo1HX(t-rKTrC~z>a$NAHA_xa^%=#v)%4aYiA zK0kic@TSUhr($_h#jeh=M2tjjQvxr=Jx-jww;F=yDD9^rB`QzO zgxPpx?4U!e*y)~HvoTRzu(|B(r zC>BL=thSP@R+aN%2eU{vM)`$kh`jPbX7DBBNiU-4kQ|CRFN>GMS)zT_{=|zrPFA&s z&E?~cr;A$=*Jsjwo3=x}JdcjDt1TEO6$!7um*EF*ha6hY-C%3Yhnrm5zh88H1e&VV zeVdZ%&ttFb&YDzbaDJ`FskJp~$KY}UqMPxdoROK|CkIiHTR!|ej{UG zZa5uzz;^Ce>a7QVZA52fXx1W<@ijBdVm#B)Gs{_q)%fK+CRI8Ulub8~4!0&}rSISd zy3>vVZLlRysZ0EmAw@u|E$n0Ns`bqpRUe_%2haQ^1S?7X*QxCalwynoN3+bv#P*5W@8MC z2snQ%?zVci+naz`P04=r3zZ^EH2hf@JRI1Y{yVq(=L>$;1Z5T}^!+TY?^W1F{R*c< z%f{kM6&R|Jj!y1l5#{d`DPQ)BPB=D7&Sp)gpDa}r<+I`I?u!hqy;SNMs?tTRq# zH1inl2CpA#nR5uExc9~SbY?Ywvb@sbX!nIlecKX!eGdNoho0G-Pr&}g@@x%v;$Cuj znb;IXIeF?h(?$w(9=`j!)!!@t?vIX7AU1cPPNb@T8#QTP@akl(b5q(8?bSvrq22aa zpvz55-FfRzL63J;5Z=KrtF#$tMSqDj)#j*xoCXA)Z)&OW5nGCSCuPQUr*%*UI>76L zn!#2qae~X}2gbNU|2gC7e7j4a23HgE*C za*}4HQY(~?I5l5!KK3oiowPLZEd_!=1$hH(Nj^JjA$pyyenG$}hp$mZgq@i*XsAux zN~GQD7=&agp`}SUsa!U=lw?hyxV_Wo3c~@Zvao&Uaq9=*M$%CZkz>K6DOW}v#(BBBl#I|?eOgP`)3dnNIxc=+3 zJ$pg)UJLYMN?P;guNQur7tO?l0YA=Vw`9|eKxXjD(k)FL?dDV*5AYDB@=Ms6?LVQ;`HRW#q@yJs7CQd zO_Q(Pn@sYdt53cOwGTVeqlbG`9iyY~7JpsdSkO{7E`_No#dK9oV!~urb0crPN3@Sx z$ukJ-bz;2ILH59~(tpE2+;2aY(IRfdq1V9R7W`u!aik((?XBZM zsRhH4xN72~e>c|fmUw@K5F?PTYH?Xg_IG5(8!L05oD;bEwCO0_`DoqhSWtn!Rxd!vDkkXz~#tAo#BCsSWoPNzR8NR+*cscKF0Ao5TFM%wv$I@MPkg=Y|94OXHBR1vRJ9G6DNkF zLd$e%7y0${M8WXfaF9c1^7F%YGziJSZw>q%dD>ANRbzg$5lXIMYC*S5+M$e7^F}u7 z6FkJ7MMRHF{6TzUoepSG%7(ass=6Qx-zOUR%~kECtSv5b-Rg77+#2r^l{|_oP4Fo{ z;uG;Q{**Zjr%4|0e9YiNp6v&t(jOetbk1HJbR@!(whc)Ob_Pz0?OSI>Zj%0oIb$*h zr{Nq)BW9c9Ra!Th#q~CM1*&a)X{8eqs1ipsAd6$3`W>>k)rsA)`h!x3#y@}I`mest zQ_$kV-Codb&tHJAa??zNv9}t+oL^#7iwh20#CDv467Lx6#9Tm=38IUfh{3D~rvn?f zp8SnxK{6@_<#1|cQNU%_7&DA=^E;Nw={InVl$fXYgnc;D8v@t=}(qgtYo%t6>-t01$VbOc6rIGgcvENZ`M(ogxp4 z>TyO4WOR$BABs?hG`06T^!fA{Pu9&}zxl^mm3w+;L}q)DjYZFq9c^C|H;1W_$V^xm zv8ArTrL!;N&hM8Gy7>U6*6B;gJVCz0_j(*=kPuNL5)mnrJR>4IC;xV(bnmE4K4Nu- z>d%$jKy>v|VdYBQAKh^Rvc-jYla+dr4BqAAHINp_uH~uk{ljM&H76(JZv~G9e=7h` zYTzXp3ibX>Oux?dPX-%^rTjv_Pw}juf`F_#nyg8z`cX}f=vZ!w)VRLrii^R@W2t0f z)f~47O8N7m~$gQO2-#5Lsj2N6P`&k#*@^6p(f7X;r)o4GTw;) z1?KqUj9d9JeBoIKD2WmrRbQ(v9Z=s)j_rF#$LP;ZKnM!O$3{z23l0--TlF@*%*h3e(Lf!PqVtUDO=R};G9ZR0*s;TEb3kQ zLzwbr!Fj{PFZaHZ$fm8#SLRNZW155(Of%ondMh@Q}wKtS>~U7z3kE@k~*tCLZ@1p#W+hdp~jv*`N?%y zI3ecn`vB|t?XfP;7tD@_NY+;aHU-q4xrnMloQo?OyftTHS{@g$v6s``cJJ=ob{THD zIA~KPn>~Xk;$KxLjY#)=PsW%qL@)nMT~(e@JJdns&-*1T#ZbOLt+{Oos!eq zSGk$Kp}zXanahv*=RWx$klt7-jMpnd_E%afe{wV1O4Qn+fO*Yss&gBC;QkM9?|bwN zT$azX5%Z|Ew{MbQd~0x=0!x7jMn~M33(34COOQ?3V7Tmm%GZ5oXlDklDv%U97bpLq zPpEAvTc^~%$#YQ2*)3k=t*l(94kEp$hPh|H+j90}F--w>u1n_7`uIDsK&;s!ZnN69 z;~gpg*p8ZVNBx;W{lKi&?-4K*pN*-{nk>OCs2pNi31dIap5wNo+Sls+%NeBhGtX;W zjp%sY!d%0$IMQw2A|qZ*Etf_5KdO5y+u7!Gcl9~jgrZHmE_7$sC*%ze5^`Ow77`Az+zaX7yJvfnLJfR~=&8%+6WFg(6akZVA& z%HD~wF)E2MX)rgI_V>=HvAF1F>X^ZTu)=}t2Ta_93z)(u?vQF^Wd%l288FhI5gmSV zGF&T%I@@im7X$ZEyKf{uzKnie68ln+M>G;;!LYH*mSin`J0cG<&=Lg|!IA8K+^L0g z<8!$EZ@x?cRj1WcIDZ%dzUTW((-dF5QWw08Wqf>wZe-V)9=Tw8@!XC5qeaH)?nd9j zg25!O0JBeeGGyz~*m~K-=IJdp7r&*{QnOm)F&pZ%D6073Z<_+uWXYJt%4CO?F_Ajg z^&u7^eXpgu>MMI%XGxz-J9{H$@Q4Ag4(=8@0}|lu24nMGtULh%#XHAqPX?ArG_fV9 z(HIa@E6`}FY_D*q_o18K6>^PhwpZeN@W`S^_v$8Ygjsv6s2|@PS*TovxW0Gp<%JW_Nhs&&&262{8l&ZR8A?< z)`ZW!SZJa058sZ2o9jFR()IN<+v^|Br78o;Tju}xJ+iIKp0CreHU`W*{6&rUH>;Qi zl6zq>;xy>f@1d{veMl#=zOogL!ZeYO&2{u=aUO3bo9`XcGfb_Qe|&Qe z2Bd3LuX;>P6lnDc1ZvYxI}aw$E$XqpK{v#c6@M`UxOV{>sqV~=3yY5^y58#Rza*|? zZbJzWAw~0T7WcFi!SlPP!r!II0=Xw~(!>kXi1SYkI(Shiv4Ja1DDVYi{yysm?xY^! zN8=QZrRAzUYS*2vjCob?m3e5r`^fGz=)@qB`$f8r@(peC*eW61E@QM^`H%wbbpsp1 zmefz#5N{gbtnPawijsrFKU7^L-&yUvR#E8s98%kFJh^pTVFasUy|-o^SM0wmgaNqN<&4cSzc{( zj+^#1G8~|KyF0#1x!Wqh%Ein{Lp#b^amR0ehP-9bFxbPSoYWCp!t{n3mvfUe)txkA^;o#IIA|f5& zI|GF}XJ@#EflTG*lpi!V=x5U9tL8T&ccfFBzaHD%cvn;X71YXI9MD~`n}?S@`1^T! z$4E?V*3Ytfd;G_Bij0C zyjM+!xAWtk-7J-mhqUUz2SYRX_M(~OO9ULd`XW%J?3e?xz7w3-lm{fO-%HROw)-5C zLH;t~$n7A05kV~jC(FikTF2q$3>xG2+9`ETIihTU9(#<~X)~l__<~5p24y(fL(`$c=~M;tdH@t& zE2;#f?7yueWs$Y(IOm!Oxfak+K8Sw0VZ1m7`hYwfi2Ls9FKl*mm|XR$I@w--UGpx~ zCM@~^$lpcZ@)zmU&*sNA9bT5WM;FL`&fhJBURvPJaq>pmikn=A#sA(T^v&I zH`M%9AleqkE0^}f8}15UG%3UC(~Y@e9Tw;Fe3EG^5xX(xN0H8DV^^x3tIE~+%hvcm zy?Wf2i9z2{*m1#b1B3GfL>G!>-}239EAsQ}WP{InKZN6nyoSYzt-{U+MQT$xoJA;-$DB)c{8o|u0Ea6 zB!$~8qIcUJl{+>T9;Bh#>^vv;?|gXcMBS+{cjsxe;b-HM0f>VM2A-(EO1T#CET@%? zPy)E=AeP19g7l@?@dEY(^B8*4eM(;5-YY~3*PTfFLS^Mn;ErD9b;-q@evykyxT0ky zk7_q6;!jY{_3@&V=b1Fc;~K(j^tA!CM}Y4gkhk}lTlD+!Vo1~|ufJ+ek!lZgvBY=i zixfx}%LErti|13K10E*HaVVOvwA}jrZJ4C+1z1uX)6+tOG_&ulV2|SJF9!{<&IfEJG}DQpr$=kccx1TN5h*Yh z3KCwy64{n@2p5kC5DFTL(etrEoi^_!x2*%vW&AH};6}9QQF2whV%ooK9hv^&(C`d^ zg~3we#eYlIFA*{k{A_-{PH3XKfWaj467cZkf_eLPcG)P=ryH6#6FP=m6hL8BmRnZQ z56ya~RoK&6FGoBM0p&bEz2mU_s43=_p;3!Oy)WBOiwU{qM%5EGwm{1wA7Zy+b~Yg_ zIQY53rqe>qBX!b+8BqO6e2H#IIMOZX*u)j`EYw<;to=by@a9}h7-;_^%o(+I(E8bF z$U~i>?;xtw6J}pROQUfk9r3P5-KfL~K+)3Kbti;e^A~|GJ{=B*;hST@#~T383nCwr zH-TfB_xWhyli?k8*VuZWgZ$m^3u_xNouo7;ZhoccU4`ouWAh2tO&<64+j1G^IBB;$ znCU?EM=C$WYaXBNj*MS*TUdc6TZW>UkJz^6hWuhaOWNxp}W7|cTgluJhWe{Fn zB2y^Ai|S2%_hLIZfC}U%Q$koq*)=ZE2W1jf6f%C}N!g%))rFNUJaBddC4^dn(dgLY zk%~yNCaIFOpwQon))mZnLp)+rxTuarHq1g83s|6WO%d$rgyX5^6z9@pb&(B-MLip> z@txvGqlV&o9JiB}MVNu0iJHgDKQ$t3Xz&{wO+YSgmgwPEEUD3O1wMcrBQKo2YA4jL zpTVH)5SW3OBg;A*X;yrQmK{AS?$UD~uOEsuKbc_$r|2?+3GimPug92pGoK z5U-n_2zsXe-s-7om(>QBLUvV5g+bxjL~?}E5k;-aYqA%#dxyiYyGk;E_=?k!SXZcf*MH>iKJ^VAm@7>Cc8WHEi(JU_#S=NVDYvr^K2pdR5P3agBL5weWNzV20dbwV>-XsN^WmN? zkw1Xow8ibxV9aNjh~3;qTc_6_;3}(D4GHF6RuMgTxmnexT)pwERf8fXx85%&Ch2bl z@(U+RkexNh9pLAlJS=XXeO~8?@6!BTfsE>--*=EoAThF z$OQJ||1i*>Psl_^K*MkDoCXW%k;79=b-%^?zXQKxDf!!OyTfr?chuN_5E=;;GANjM z$;$GC=sWZ_yOY+=x}bS@8E#UizT!2Tl)#~hw4IC zNo$R|Y6F^YXwT1Hr=hG_PUJXlhA64bs85!=P?o6l^%9DG3eP6V24}YjRO&d#JjyQ{ zcId?%+S)VI+1z&IzTl;yKQ|%;y`jJO?osm75$U#gO4qbPYPwDVGgBW<3%h0WdZk&2 z%b{lgd)MDBP|Xh5VDXw}Ytt&o1GVdded-q{Q$TEU$RGPA;P~F;zc&H=gR9GUs9mvt zdV%-~zf_oE!pK_R?}|baX)CRgTkp<6ycvIcsJgblR z(B0%=UuYBikh7Ri|MvgoGcix_Dc|S9OR2xglm4;{zaYXVDb>0+lb$UuFxPAOdt=mZ zwaj0FS%OY)huX;R_J-S@R5?E5?M!@*GL^zY&G0?o&7mH(Yc>=Tzw1eU&aMO=h$Lur z|Lt3Xv4&-tq zA=QOs&lV(YJe;MAz7K~K1V$^N-U*!Cw}(!V!fb2s=UFPGhd!1|PZJ$zsE^H%j@McY zI1fGA`7Nyf_r5;(7xlT_|Nru$o&-Y$Q``Z2q^`c1FoW|Q!^+~KQIvXM=X){K-TQ;g zMTun1Hwa*hhCHh?%Nc0hiVFl-Mc-UzU+H)ZQDatMya*%fn4iOrsN;79PEvciL_Fo_xo9Dt5&w*p z?|=LcntxrY?T|7xh7V~Svi|AGQl9wEKCe)_0NYGwFB4_^ zDMEJQn6%-dnZ-2xBwY>BLU%3-n6JLsDo!s~Z;AAD_IL_-4das`KYg#zKmH+Ye@&^^ z{LkEU-kgh+@c&%Hy8}nex3*#$GvPga3J`mKAR7=M$g{{Q+?m~;9w*ULL^ T%+Z(d|5O#9C=|#)d;R|a0zt57 literal 0 HcmV?d00001 diff --git a/toolbox/openpcdet/install_openpcdet.sh b/toolbox/openpcdet/install_openpcdet.sh new file mode 100644 index 000000000..53c0ac369 --- /dev/null +++ b/toolbox/openpcdet/install_openpcdet.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +TARGET_DIR=${TARGET_DIR:-} + +PYTHON_PATH=$(which python3) +PYTHON_DIST_PATH=${TARGET_DIR}/lib/python3/dist-packages + +PKG_DIR="build_pip" +PKG_NAME="pcdet" + +if [[ ! -d ${PKG_DIR} ]]; then + echo "ERROR: Package directory ${PKG_DIR} doesn't exist" + exit 1 +fi + +latest_pkg="$(ls -t ${PKG_DIR} | grep ${PKG_NAME} | head -1)" +if [[ "${latest_pkg}" == "" ]]; then + echo "ERROR: Cannot find latest ${PKG_NAME} package" + exit 1 +else + echo "INFO: Found latest package ${latest_pkg} in directory ${PKG_DIR}" +fi + +if [[ "${TARGET_DIR}" != "" ]]; then + ${PYTHON_PATH} -m pip install --upgrade --no-deps -t ${PYTHON_DIST_PATH} ${PKG_DIR}/${latest_pkg} || exit + echo "pcdet installed in ${PYTHON_DIST_PATH}; please add it to your PYTHONPATH." +else + ${PYTHON_PATH} -m pip uninstall ${PKG_NAME} -y + ${PYTHON_PATH} -m pip install --no-deps ${PKG_DIR}/${latest_pkg} || exit +fi + +# Return 0 status if all finished +exit 0 \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/__init__.py b/toolbox/openpcdet/pcdet/__init__.py new file mode 100644 index 000000000..9fdf7d2a8 --- /dev/null +++ b/toolbox/openpcdet/pcdet/__init__.py @@ -0,0 +1,24 @@ +import subprocess +from pathlib import Path + +from .version import __version__ + +__all__ = [ + '__version__' +] + + +def get_git_commit_number(): + if not (Path(__file__).parent / '../.git').exists(): + return '0000000' + + cmd_out = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE) + git_commit_number = cmd_out.stdout.decode('utf-8')[:7] + return git_commit_number + + +script_version = get_git_commit_number() + + +if script_version not in __version__: + __version__ = __version__ + '+py%s' % script_version diff --git a/toolbox/openpcdet/pcdet/config.py b/toolbox/openpcdet/pcdet/config.py new file mode 100644 index 000000000..02e5daf16 --- /dev/null +++ b/toolbox/openpcdet/pcdet/config.py @@ -0,0 +1,85 @@ +from pathlib import Path + +import yaml +from easydict import EasyDict + + +def log_config_to_file(cfg, pre='cfg', logger=None): + for key, val in cfg.items(): + if isinstance(cfg[key], EasyDict): + logger.info('----------- %s -----------' % (key)) + log_config_to_file(cfg[key], pre=pre + '.' + key, logger=logger) + continue + logger.info('%s.%s: %s' % (pre, key, val)) + + +def cfg_from_list(cfg_list, config): + """Set config keys via list (e.g., from command line).""" + from ast import literal_eval + assert len(cfg_list) % 2 == 0 + for k, v in zip(cfg_list[0::2], cfg_list[1::2]): + key_list = k.split('.') + d = config + for subkey in key_list[:-1]: + assert subkey in d, 'NotFoundKey: %s' % subkey + d = d[subkey] + subkey = key_list[-1] + assert subkey in d, 'NotFoundKey: %s' % subkey + try: + value = literal_eval(v) + except: + value = v + + if type(value) != type(d[subkey]) and isinstance(d[subkey], EasyDict): + key_val_list = value.split(',') + for src in key_val_list: + cur_key, cur_val = src.split(':') + val_type = type(d[subkey][cur_key]) + cur_val = val_type(cur_val) + d[subkey][cur_key] = cur_val + elif type(value) != type(d[subkey]) and isinstance(d[subkey], list): + val_list = value.split(',') + for k, x in enumerate(val_list): + val_list[k] = type(d[subkey][0])(x) + d[subkey] = val_list + else: + assert type(value) == type(d[subkey]), \ + 'type {} does not match original type {}'.format(type(value), type(d[subkey])) + d[subkey] = value + + +def merge_new_config(config, new_config): + if '_BASE_CONFIG_' in new_config: + with open(new_config['_BASE_CONFIG_'], 'r') as f: + try: + yaml_config = yaml.safe_load(f, Loader=yaml.FullLoader) + except: + yaml_config = yaml.safe_load(f) + config.update(EasyDict(yaml_config)) + + for key, val in new_config.items(): + if not isinstance(val, dict): + config[key] = val + continue + if key not in config: + config[key] = EasyDict() + merge_new_config(config[key], val) + + return config + + +def cfg_from_yaml_file(cfg_file, config): + with open(cfg_file, 'r') as f: + try: + new_config = yaml.safe_load(f, Loader=yaml.FullLoader) + except: + new_config = yaml.safe_load(f) + + merge_new_config(config=config, new_config=new_config) + + return config + + +cfg = EasyDict() +cfg.ROOT_DIR = (Path(__file__).resolve().parent / '../').resolve() +cfg.LOCAL_RANK = 0 diff --git a/toolbox/openpcdet/pcdet/datasets/__init__.py b/toolbox/openpcdet/pcdet/datasets/__init__.py new file mode 100644 index 000000000..d103bdb6b --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/__init__.py @@ -0,0 +1,82 @@ +import torch +from functools import partial +from torch.utils.data import DataLoader +from torch.utils.data import DistributedSampler as _DistributedSampler + +from pcdet.utils import common_utils + +from .dataset import DatasetTemplate +from .kitti.kitti_dataset import KittiDataset +from .nuscenes.nuscenes_dataset import NuScenesDataset +from .waymo.waymo_dataset import WaymoDataset +from .pandaset.pandaset_dataset import PandasetDataset +from .lyft.lyft_dataset import LyftDataset +from .once.once_dataset import ONCEDataset +# from .argo2.argo2_dataset import Argo2Dataset +from .custom.custom_dataset import CustomDataset + +__all__ = { + 'DatasetTemplate': DatasetTemplate, + 'KittiDataset': KittiDataset, + 'NuScenesDataset': NuScenesDataset, + 'WaymoDataset': WaymoDataset, + 'PandasetDataset': PandasetDataset, + 'LyftDataset': LyftDataset, + 'ONCEDataset': ONCEDataset, + 'CustomDataset': CustomDataset +} + + +class DistributedSampler(_DistributedSampler): + + def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): + super().__init__(dataset, num_replicas=num_replicas, rank=rank) + self.shuffle = shuffle + + def __iter__(self): + if self.shuffle: + g = torch.Generator() + g.manual_seed(self.epoch) + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = torch.arange(len(self.dataset)).tolist() + + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size + + indices = indices[self.rank:self.total_size:self.num_replicas] + assert len(indices) == self.num_samples + + return iter(indices) + + +def build_dataloader(dataset_cfg, class_names, batch_size, dist, root_path=None, workers=4, seed=None, + logger=None, training=True, merge_all_iters_to_one_epoch=False, total_epochs=0): + + dataset = __all__[dataset_cfg.DATASET]( + dataset_cfg=dataset_cfg, + class_names=class_names, + root_path=root_path, + training=training, + logger=logger, + ) + + if merge_all_iters_to_one_epoch: + assert hasattr(dataset, 'merge_all_iters_to_one_epoch') + dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs) + + if dist: + if training: + sampler = torch.utils.data.distributed.DistributedSampler(dataset) + else: + rank, world_size = common_utils.get_dist_info() + sampler = DistributedSampler(dataset, world_size, rank, shuffle=False) + else: + sampler = None + dataloader = DataLoader( + dataset, batch_size=batch_size, pin_memory=True, num_workers=workers, + shuffle=(sampler is None) and training, collate_fn=dataset.collate_batch, + drop_last=False, sampler=sampler, timeout=0, worker_init_fn=partial(common_utils.worker_init_fn, seed=seed) + ) + + return dataset, dataloader, sampler diff --git a/toolbox/openpcdet/pcdet/datasets/argo2/__init__.py b/toolbox/openpcdet/pcdet/datasets/argo2/__init__.py new file mode 100644 index 000000000..139597f9c --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/argo2/__init__.py @@ -0,0 +1,2 @@ + + diff --git a/toolbox/openpcdet/pcdet/datasets/argo2/argo2_dataset.py b/toolbox/openpcdet/pcdet/datasets/argo2/argo2_dataset.py new file mode 100644 index 000000000..70e932e7f --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/argo2/argo2_dataset.py @@ -0,0 +1,537 @@ +import copy +import pickle +import argparse +import os +from os import path as osp +import torch +from av2.utils.io import read_feather +import numpy as np +import multiprocessing as mp +import pickle as pkl +from pathlib import Path +import pandas as pd + +from ..dataset import DatasetTemplate +from .argo2_utils.so3 import yaw_to_quat, quat_to_yaw +from .argo2_utils.constants import LABEL_ATTR + + +def process_single_segment(segment_path, split, info_list, ts2idx, output_dir, save_bin): + test_mode = 'test' in split + if not test_mode: + segment_anno = read_feather(Path(osp.join(segment_path, 'annotations.feather'))) + segname = segment_path.split('/')[-1] + + frame_path_list = os.listdir(osp.join(segment_path, 'sensors/lidar/')) + + for frame_name in frame_path_list: + ts = int(osp.basename(frame_name).split('.')[0]) + + if not test_mode: + frame_anno = segment_anno[segment_anno['timestamp_ns'] == ts] + else: + frame_anno = None + + frame_path = osp.join(segment_path, 'sensors/lidar/', frame_name) + frame_info = process_and_save_frame(frame_path, frame_anno, ts2idx, segname, output_dir, save_bin) + info_list.append(frame_info) + + +def process_and_save_frame(frame_path, frame_anno, ts2idx, segname, output_dir, save_bin): + frame_info = {} + frame_info['uuid'] = segname + '/' + frame_path.split('/')[-1].split('.')[0] + frame_info['sample_idx'] = ts2idx[frame_info['uuid']] + frame_info['image'] = dict() + frame_info['point_cloud'] = dict( + num_features=4, + velodyne_path=None, + ) + frame_info['calib'] = dict() # not need for lidar-only + frame_info['pose'] = dict() # not need for single frame + frame_info['annos'] = dict( + name=None, + truncated=None, + occluded=None, + alpha=None, + bbox=None, # not need for lidar-only + dimensions=None, + location=None, + rotation_y=None, + index=None, + group_ids=None, + camera_id=None, + difficulty=None, + num_points_in_gt=None, + ) + frame_info['sweeps'] = [] # not need for single frame + if frame_anno is not None: + frame_anno = frame_anno[frame_anno['num_interior_pts'] > 0] + cuboid_params = frame_anno.loc[:, list(LABEL_ATTR)].to_numpy() + cuboid_params = torch.from_numpy(cuboid_params) + yaw = quat_to_yaw(cuboid_params[:, -4:]) + xyz = cuboid_params[:, :3] + lwh = cuboid_params[:, [3, 4, 5]] + + cat = frame_anno['category'].to_numpy().tolist() + cat = [c.lower().capitalize() for c in cat] + cat = np.array(cat) + + num_obj = len(cat) + + annos = frame_info['annos'] + annos['name'] = cat + annos['truncated'] = np.zeros(num_obj, dtype=np.float64) + annos['occluded'] = np.zeros(num_obj, dtype=np.int64) + annos['alpha'] = -10 * np.ones(num_obj, dtype=np.float64) + annos['dimensions'] = lwh.numpy().astype(np.float64) + annos['location'] = xyz.numpy().astype(np.float64) + annos['rotation_y'] = yaw.numpy().astype(np.float64) + annos['index'] = np.arange(num_obj, dtype=np.int32) + annos['num_points_in_gt'] = frame_anno['num_interior_pts'].to_numpy().astype(np.int32) + # frame_info['group_ids'] = np.arange(num_obj, dtype=np.int32) + prefix2split = {'0': 'training', '1': 'training', '2': 'testing'} + sample_idx = frame_info['sample_idx'] + split = prefix2split[sample_idx[0]] + abs_save_path = osp.join(output_dir, split, 'velodyne', f'{sample_idx}.bin') + rel_save_path = osp.join(split, 'velodyne', f'{sample_idx}.bin') + frame_info['point_cloud']['velodyne_path'] = rel_save_path + if save_bin: + save_point_cloud(frame_path, abs_save_path) + return frame_info + + +def save_point_cloud(frame_path, save_path): + lidar = read_feather(Path(frame_path)) + lidar = lidar.loc[:, ['x', 'y', 'z', 'intensity']].to_numpy().astype(np.float32) + lidar.tofile(save_path) + + +def prepare(root): + ts2idx = {} + ts_list = [] + bin_idx_list = [] + seg_path_list = [] + seg_split_list = [] + assert root.split('/')[-1] == 'sensor' + # include test if you need it + splits = ['train', 'val'] # , 'test'] + num_train_samples = 0 + num_val_samples = 0 + num_test_samples = 0 + + # 0 for training, 1 for validation and 2 for testing. + prefixes = [0, 1, ] # 2] + + for i in range(len(splits)): + split = splits[i] + prefix = prefixes[i] + split_root = osp.join(root, split) + seg_file_list = os.listdir(split_root) + print(f'num of {split} segments:', len(seg_file_list)) + for seg_idx, seg_name in enumerate(seg_file_list): + seg_path = osp.join(split_root, seg_name) + seg_path_list.append(seg_path) + seg_split_list.append(split) + assert seg_idx < 1000 + frame_path_list = os.listdir(osp.join(seg_path, 'sensors/lidar/')) + for frame_idx, frame_path in enumerate(frame_path_list): + assert frame_idx < 1000 + bin_idx = str(prefix) + str(seg_idx).zfill(3) + str(frame_idx).zfill(3) + ts = frame_path.split('/')[-1].split('.')[0] + ts = seg_name + '/' + ts # ts is not unique, so add seg_name + ts2idx[ts] = bin_idx + ts_list.append(ts) + bin_idx_list.append(bin_idx) + if split == 'train': + num_train_samples = len(ts_list) + elif split == 'val': + num_val_samples = len(ts_list) - num_train_samples + else: + num_test_samples = len(ts_list) - num_train_samples - num_val_samples + # print three num samples + print('num of train samples:', num_train_samples) + print('num of val samples:', num_val_samples) + print('num of test samples:', num_test_samples) + + assert len(ts_list) == len(set(ts_list)) + assert len(bin_idx_list) == len(set(bin_idx_list)) + return ts2idx, seg_path_list, seg_split_list + +def create_argo2_infos(seg_path_list, seg_split_list, info_list, ts2idx, output_dir, save_bin, token, num_process): + for seg_i, seg_path in enumerate(seg_path_list): + if seg_i % num_process != token: + continue + print(f'processing segment: {seg_i}/{len(seg_path_list)}') + split = seg_split_list[seg_i] + process_single_segment(seg_path, split, info_list, ts2idx, output_dir, save_bin) + + +class Argo2Dataset(DatasetTemplate): + def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None): + """ + Args: + root_path: + dataset_cfg: + class_names: + training: + logger: + """ + super().__init__( + dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger + ) + self.split = self.dataset_cfg.DATA_SPLIT[self.mode] + self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing') + + split_dir = self.root_path / 'ImageSets' / (self.split + '.txt') + self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None + + self.argo2_infos = [] + self.include_argo2_data(self.mode) + self.evaluate_range = dataset_cfg.get("EVALUATE_RANGE", 200.0) + + def include_argo2_data(self, mode): + if self.logger is not None: + self.logger.info('Loading Argoverse2 dataset') + argo2_infos = [] + + for info_path in self.dataset_cfg.INFO_PATH[mode]: + info_path = self.root_path / info_path + if not info_path.exists(): + continue + with open(info_path, 'rb') as f: + infos = pickle.load(f) + argo2_infos.extend(infos) + + self.argo2_infos.extend(argo2_infos) + + if self.logger is not None: + self.logger.info('Total samples for Argo2 dataset: %d' % (len(argo2_infos))) + + def set_split(self, split): + super().__init__( + dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger + ) + self.split = split + self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing') + + split_dir = self.root_path / 'ImageSets' / (self.split + '.txt') + self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None + + def get_lidar(self, idx): + lidar_file = self.root_split_path / 'velodyne' / ('%s.bin' % idx) + assert lidar_file.exists() + return np.fromfile(str(lidar_file), dtype=np.float32).reshape(-1, 4) + + @staticmethod + def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None): + """ + Args: + batch_dict: + frame_id: + pred_dicts: list of pred_dicts + pred_boxes: (N, 7), Tensor + pred_scores: (N), Tensor + pred_labels: (N), Tensor + class_names: + output_path: + + Returns: + + """ + def get_template_prediction(num_samples): + ret_dict = { + 'name': np.zeros(num_samples), 'truncated': np.zeros(num_samples), + 'occluded': np.zeros(num_samples), 'alpha': np.zeros(num_samples), + 'bbox': np.zeros([num_samples, 4]), 'dimensions': np.zeros([num_samples, 3]), + 'location': np.zeros([num_samples, 3]), 'rotation_y': np.zeros(num_samples), + 'score': np.zeros(num_samples), 'boxes_lidar': np.zeros([num_samples, 7]) + } + return ret_dict + + def generate_single_sample_dict(batch_index, box_dict): + pred_scores = box_dict['pred_scores'].cpu().numpy() + pred_boxes = box_dict['pred_boxes'].cpu().numpy() + pred_labels = box_dict['pred_labels'].cpu().numpy() + pred_dict = get_template_prediction(pred_scores.shape[0]) + if pred_scores.shape[0] == 0: + return pred_dict + + pred_boxes_img = pred_boxes + pred_boxes_camera = pred_boxes + + pred_dict['name'] = np.array(class_names)[pred_labels - 1] + pred_dict['alpha'] = -np.arctan2(-pred_boxes[:, 1], pred_boxes[:, 0]) + pred_boxes_camera[:, 6] + pred_dict['bbox'] = pred_boxes_img + pred_dict['dimensions'] = pred_boxes_camera[:, 3:6] + pred_dict['location'] = pred_boxes_camera[:, 0:3] + pred_dict['rotation_y'] = pred_boxes_camera[:, 6] + pred_dict['score'] = pred_scores + pred_dict['boxes_lidar'] = pred_boxes + + return pred_dict + + annos = [] + for index, box_dict in enumerate(pred_dicts): + frame_id = batch_dict['frame_id'][index] + + single_pred_dict = generate_single_sample_dict(index, box_dict) + single_pred_dict['frame_id'] = frame_id + annos.append(single_pred_dict) + + if output_path is not None: + cur_det_file = output_path / ('%s.txt' % frame_id) + with open(cur_det_file, 'w') as f: + bbox = single_pred_dict['bbox'] + loc = single_pred_dict['location'] + dims = single_pred_dict['dimensions'] # lhw -> hwl + + for idx in range(len(bbox)): + print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f' + % (single_pred_dict['name'][idx], single_pred_dict['alpha'][idx], + bbox[idx][0], bbox[idx][1], bbox[idx][2], bbox[idx][3], + dims[idx][1], dims[idx][2], dims[idx][0], loc[idx][0], + loc[idx][1], loc[idx][2], single_pred_dict['rotation_y'][idx], + single_pred_dict['score'][idx]), file=f) + + return annos + + def __len__(self): + if self._merge_all_iters_to_one_epoch: + return len(self.argo2_infos) * self.total_epochs + + return len(self.argo2_infos) + + def __getitem__(self, index): + # index = 4 + if self._merge_all_iters_to_one_epoch: + index = index % len(self.argo2_infos) + + info = copy.deepcopy(self.argo2_infos[index]) + + sample_idx = info['point_cloud']['velodyne_path'].split('/')[-1].rstrip('.bin') + calib = None + get_item_list = self.dataset_cfg.get('GET_ITEM_LIST', ['points']) + + input_dict = { + 'frame_id': sample_idx, + 'calib': calib, + } + + if 'annos' in info: + annos = info['annos'] + loc, dims, rots = annos['location'], annos['dimensions'], annos['rotation_y'] + gt_names = annos['name'] + gt_bboxes_3d = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32) + + input_dict.update({ + 'gt_names': gt_names, + 'gt_boxes': gt_bboxes_3d + }) + + if "points" in get_item_list: + points = self.get_lidar(sample_idx) + input_dict['points'] = points + + input_dict['calib'] = calib + data_dict = self.prepare_data(data_dict=input_dict) + + return data_dict + + def format_results(self, + outputs, + class_names, + pklfile_prefix=None, + submission_prefix=None, + ): + """Format the results to .feather file with argo2 format. + + Args: + outputs (list[dict]): Testing results of the dataset. + pklfile_prefix (str | None): The prefix of pkl files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + submission_prefix (str | None): The prefix of submitted files. It + includes the file path and the prefix of filename, e.g., + "a/b/prefix". If not specified, a temp file will be created. + Default: None. + + Returns: + tuple: (result_files, tmp_dir), result_files is a dict containing + the json filepaths, tmp_dir is the temporal directory created + for saving json files when jsonfile_prefix is not specified. + """ + import pandas as pd + + assert len(self.argo2_infos) == len(outputs) + num_samples = len(outputs) + print('\nGot {} samples'.format(num_samples)) + + serialized_dts_list = [] + + print('\nConvert predictions to Argoverse 2 format') + for i in range(num_samples): + out_i = outputs[i] + log_id, ts = self.argo2_infos[i]['uuid'].split('/') + track_uuid = None + #cat_id = out_i['labels_3d'].numpy().tolist() + #category = [class_names[i].upper() for i in cat_id] + category = [class_name.upper() for class_name in out_i['name']] + serialized_dts = pd.DataFrame( + self.lidar_box_to_argo2(out_i['bbox']).numpy(), columns=list(LABEL_ATTR) + ) + serialized_dts["score"] = out_i['score'] + serialized_dts["log_id"] = log_id + serialized_dts["timestamp_ns"] = int(ts) + serialized_dts["category"] = category + serialized_dts_list.append(serialized_dts) + + dts = ( + pd.concat(serialized_dts_list) + .set_index(["log_id", "timestamp_ns"]) + .sort_index() + ) + + dts = dts.sort_values("score", ascending=False).reset_index() + + if pklfile_prefix is not None: + if not pklfile_prefix.endswith(('.feather')): + pklfile_prefix = f'{pklfile_prefix}.feather' + dts.to_feather(pklfile_prefix) + print(f'Result is saved to {pklfile_prefix}.') + + dts = dts.set_index(["log_id", "timestamp_ns"]).sort_index() + + return dts + + def lidar_box_to_argo2(self, boxes): + boxes = torch.Tensor(boxes) + cnt_xyz = boxes[:, :3] + lwh = boxes[:, [3, 4, 5]] + yaw = boxes[:, 6] + + quat = yaw_to_quat(yaw) + argo_cuboid = torch.cat([cnt_xyz, lwh, quat], dim=1) + return argo_cuboid + + def evaluation(self, + results, + class_names, + eval_metric='waymo', + logger=None, + pklfile_prefix=None, + submission_prefix=None, + show=False, + output_path=None, + pipeline=None): + """Evaluation in Argo2 protocol. + + Args: + results (list[dict]): Testing results of the dataset. + metric (str | list[str]): Metrics to be evaluated. + Default: 'waymo'. Another supported metric is 'Argo2'. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + pklfile_prefix (str | None): The prefix of pkl files. It includes + the file path and the prefix of filename, e.g., "a/b/prefix". + If not specified, a temp file will be created. Default: None. + submission_prefix (str | None): The prefix of submission datas. + If not specified, the submission data will not be generated. + show (bool): Whether to visualize. + Default: False. + out_dir (str): Path to save the visualization results. + Default: None. + pipeline (list[dict], optional): raw data loading for showing. + Default: None. + + Returns: + dict[str: float]: results of each evaluation metric + """ + from av2.evaluation.detection.constants import CompetitionCategories + from av2.evaluation.detection.utils import DetectionCfg + from av2.evaluation.detection.eval import evaluate + from av2.utils.io import read_feather + + dts = self.format_results(results, class_names, pklfile_prefix, submission_prefix) + argo2_root = self.root_path + val_anno_path = osp.join(argo2_root, 'val_anno.feather') + gts = read_feather(Path(val_anno_path)) + gts = gts.set_index(["log_id", "timestamp_ns"]).sort_values("category") + + valid_uuids_gts = gts.index.tolist() + valid_uuids_dts = dts.index.tolist() + valid_uuids = set(valid_uuids_gts) & set(valid_uuids_dts) + gts = gts.loc[list(valid_uuids)].sort_index() + + categories = set(x.value for x in CompetitionCategories) + categories &= set(gts["category"].unique().tolist()) + + dataset_dir = Path(argo2_root) / 'sensor' / 'val' + cfg = DetectionCfg( + dataset_dir=dataset_dir, + categories=tuple(sorted(categories)), + max_range_m=self.evaluate_range, + eval_only_roi_instances=True, + ) + + # Evaluate using Argoverse detection API. + eval_dts, eval_gts, metrics = evaluate( + dts.reset_index(), gts.reset_index(), cfg + ) + + valid_categories = sorted(categories) + ["AVERAGE_METRICS"] + ap_dict = {} + for index, row in metrics.iterrows(): + ap_dict[index] = row.to_json() + return metrics.loc[valid_categories], ap_dict + +def parse_config(): + parser = argparse.ArgumentParser(description='arg parser') + parser.add_argument('--root_path', type=str, default="/data/argo2/sensor") + parser.add_argument('--output_dir', type=str, default="/data/argo2/processed") + args = parser.parse_args() + return args + + +if __name__ == '__main__': + args = parse_config() + root = args.root_path + output_dir = args.output_dir + save_bin = True + ts2idx, seg_path_list, seg_split_list = prepare(root) + + velodyne_dir = Path(output_dir) / 'training' / 'velodyne' + if not velodyne_dir.exists(): + velodyne_dir.mkdir(parents=True, exist_ok=True) + + info_list = [] + create_argo2_infos(seg_path_list, seg_split_list, info_list, ts2idx, output_dir, save_bin, 0, 1) + + assert len(info_list) > 0 + + train_info = [e for e in info_list if e['sample_idx'][0] == '0'] + val_info = [e for e in info_list if e['sample_idx'][0] == '1'] + test_info = [e for e in info_list if e['sample_idx'][0] == '2'] + trainval_info = train_info + val_info + assert len(train_info) + len(val_info) + len(test_info) == len(info_list) + + # save info_list in under the output_dir as pickle file + with open(osp.join(output_dir, 'argo2_infos_train.pkl'), 'wb') as f: + pkl.dump(train_info, f) + + with open(osp.join(output_dir, 'argo2_infos_val.pkl'), 'wb') as f: + pkl.dump(val_info, f) + + # save validation anno feather + save_feather_path = os.path.join(output_dir, 'val_anno.feather') + val_seg_path_list = [seg_path for seg_path in seg_path_list if 'val' in seg_path] + assert len(val_seg_path_list) == len([i for i in seg_split_list if i == 'val']) + + seg_anno_list = [] + for seg_path in val_seg_path_list: + seg_anno = read_feather(osp.join(seg_path, 'annotations.feather')) + log_id = seg_path.split('/')[-1] + seg_anno["log_id"] = log_id + seg_anno_list.append(seg_anno) + + gts = pd.concat(seg_anno_list).reset_index() + gts.to_feather(save_feather_path) diff --git a/toolbox/openpcdet/pcdet/datasets/argo2/argo2_utils/constants.py b/toolbox/openpcdet/pcdet/datasets/argo2/argo2_utils/constants.py new file mode 100644 index 000000000..e53a0c9d4 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/argo2/argo2_utils/constants.py @@ -0,0 +1,12 @@ +LABEL_ATTR = ( + "tx_m", + "ty_m", + "tz_m", + "length_m", + "width_m", + "height_m", + "qw", + "qx", + "qy", + "qz", +) \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/datasets/argo2/argo2_utils/so3.py b/toolbox/openpcdet/pcdet/datasets/argo2/argo2_utils/so3.py new file mode 100644 index 000000000..f5ec5e095 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/argo2/argo2_utils/so3.py @@ -0,0 +1,141 @@ +"""SO(3) group transformations.""" + +import kornia.geometry.conversions as C +import torch +from torch import Tensor +from math import pi as PI + + +@torch.jit.script +def quat_to_mat(quat_wxyz: Tensor) -> Tensor: + """Convert scalar first quaternion to rotation matrix. + + Args: + quat_wxyz: (...,4) Scalar first quaternions. + + Returns: + (...,3,3) 3D rotation matrices. + """ + return C.quaternion_to_rotation_matrix( + quat_wxyz, order=C.QuaternionCoeffOrder.WXYZ + ) + + +# @torch.jit.script +def mat_to_quat(mat: Tensor) -> Tensor: + """Convert rotation matrix to scalar first quaternion. + + Args: + mat: (...,3,3) 3D rotation matrices. + + Returns: + (...,4) Scalar first quaternions. + """ + return C.rotation_matrix_to_quaternion( + mat, order=C.QuaternionCoeffOrder.WXYZ + ) + + +@torch.jit.script +def quat_to_xyz( + quat_wxyz: Tensor, singularity_value: float = PI / 2 +) -> Tensor: + """Convert scalar first quaternion to Tait-Bryan angles. + + Reference: + https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles#Source_code_2 + + Args: + quat_wxyz: (...,4) Scalar first quaternions. + singularity_value: Value that's set at the singularities. + + Returns: + (...,3) The Tait-Bryan angles --- roll, pitch, and yaw. + """ + qw = quat_wxyz[..., 0] + qx = quat_wxyz[..., 1] + qy = quat_wxyz[..., 2] + qz = quat_wxyz[..., 3] + + # roll (x-axis rotation) + sinr_cosp = 2 * (qw * qx + qy * qz) + cosr_cosp = 1 - 2 * (qx * qx + qy * qy) + roll = torch.atan2(sinr_cosp, cosr_cosp) + + # pitch (y-axis rotation) + pitch = 2 * (qw * qy - qz * qx) + is_out_of_range = torch.abs(pitch) >= 1 + pitch[is_out_of_range] = torch.copysign( + torch.as_tensor(singularity_value), pitch[is_out_of_range] + ) + pitch[~is_out_of_range] = torch.asin(pitch[~is_out_of_range]) + + # yaw (z-axis rotation) + siny_cosp = 2 * (qw * qz + qx * qy) + cosy_cosp = 1 - 2 * (qy * qy + qz * qz) + yaw = torch.atan2(siny_cosp, cosy_cosp) + xyz = torch.stack([roll, pitch, yaw], dim=-1) + return xyz + + +@torch.jit.script +def quat_to_yaw(quat_wxyz: Tensor) -> Tensor: + """Convert scalar first quaternion to yaw (rotation about vertical axis). + + Reference: + https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles#Source_code_2 + + Args: + quat_wxyz: (...,4) Scalar first quaternions. + + Returns: + (...,) The rotation about the z-axis in radians. + """ + xyz = quat_to_xyz(quat_wxyz) + yaw_rad: Tensor = xyz[..., -1] + return yaw_rad + + +@torch.jit.script +def xyz_to_quat(xyz_rad: Tensor) -> Tensor: + """Convert euler angles (xyz - pitch, roll, yaw) to scalar first quaternions. + + Args: + xyz_rad: (...,3) Tensor of roll, pitch, and yaw in radians. + + Returns: + (...,4) Scalar first quaternions (wxyz). + """ + x_rad = xyz_rad[..., 0] + y_rad = xyz_rad[..., 1] + z_rad = xyz_rad[..., 2] + + cy = torch.cos(z_rad * 0.5) + sy = torch.sin(z_rad * 0.5) + cp = torch.cos(y_rad * 0.5) + sp = torch.sin(y_rad * 0.5) + cr = torch.cos(x_rad * 0.5) + sr = torch.sin(x_rad * 0.5) + + qw = cr * cp * cy + sr * sp * sy + qx = sr * cp * cy - cr * sp * sy + qy = cr * sp * cy + sr * cp * sy + qz = cr * cp * sy - sr * sp * cy + quat_wxyz = torch.stack([qw, qx, qy, qz], dim=-1) + return quat_wxyz + + +@torch.jit.script +def yaw_to_quat(yaw_rad: Tensor) -> Tensor: + """Convert yaw (rotation about the vertical axis) to scalar first quaternions. + + Args: + yaw_rad: (...,1) Rotations about the z-axis. + + Returns: + (...,4) scalar first quaternions (wxyz). + """ + xyz_rad = torch.zeros_like(yaw_rad)[..., None].repeat_interleave(3, dim=-1) + xyz_rad[..., -1] = yaw_rad + quat_wxyz: Tensor = xyz_to_quat(xyz_rad) + return quat_wxyz diff --git a/toolbox/openpcdet/pcdet/datasets/augmentor/__init__.py b/toolbox/openpcdet/pcdet/datasets/augmentor/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/datasets/augmentor/augmentor_utils.py b/toolbox/openpcdet/pcdet/datasets/augmentor/augmentor_utils.py new file mode 100644 index 000000000..3c088e33c --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/augmentor/augmentor_utils.py @@ -0,0 +1,658 @@ +import numpy as np +import math +import copy +from ...utils import common_utils +from ...utils import box_utils + + +def random_flip_along_x(gt_boxes, points, return_flip=False, enable=None): + """ + Args: + gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]] + points: (M, 3 + C) + Returns: + """ + if enable is None: + enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5]) + if enable: + gt_boxes[:, 1] = -gt_boxes[:, 1] + gt_boxes[:, 6] = -gt_boxes[:, 6] + points[:, 1] = -points[:, 1] + + if gt_boxes.shape[1] > 7: + gt_boxes[:, 8] = -gt_boxes[:, 8] + if return_flip: + return gt_boxes, points, enable + return gt_boxes, points + + +def random_flip_along_y(gt_boxes, points, return_flip=False, enable=None): + """ + Args: + gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]] + points: (M, 3 + C) + Returns: + """ + if enable is None: + enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5]) + if enable: + gt_boxes[:, 0] = -gt_boxes[:, 0] + gt_boxes[:, 6] = -(gt_boxes[:, 6] + np.pi) + points[:, 0] = -points[:, 0] + + if gt_boxes.shape[1] > 7: + gt_boxes[:, 7] = -gt_boxes[:, 7] + if return_flip: + return gt_boxes, points, enable + return gt_boxes, points + + +def global_rotation(gt_boxes, points, rot_range, return_rot=False, noise_rotation=None): + """ + Args: + gt_boxes: (N, 7 + C), [x, y, z, dx, dy, dz, heading, [vx], [vy]] + points: (M, 3 + C), + rot_range: [min, max] + Returns: + """ + if noise_rotation is None: + noise_rotation = np.random.uniform(rot_range[0], rot_range[1]) + points = common_utils.rotate_points_along_z(points[np.newaxis, :, :], np.array([noise_rotation]))[0] + gt_boxes[:, 0:3] = common_utils.rotate_points_along_z(gt_boxes[np.newaxis, :, 0:3], np.array([noise_rotation]))[0] + gt_boxes[:, 6] += noise_rotation + if gt_boxes.shape[1] > 7: + gt_boxes[:, 7:9] = common_utils.rotate_points_along_z( + np.hstack((gt_boxes[:, 7:9], np.zeros((gt_boxes.shape[0], 1))))[np.newaxis, :, :], + np.array([noise_rotation]) + )[0][:, 0:2] + + if return_rot: + return gt_boxes, points, noise_rotation + return gt_boxes, points + + +def global_scaling(gt_boxes, points, scale_range, return_scale=False): + """ + Args: + gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading] + points: (M, 3 + C), + scale_range: [min, max] + Returns: + """ + if scale_range[1] - scale_range[0] < 1e-3: + return gt_boxes, points + noise_scale = np.random.uniform(scale_range[0], scale_range[1]) + points[:, :3] *= noise_scale + gt_boxes[:, :6] *= noise_scale + if gt_boxes.shape[1] > 7: + gt_boxes[:, 7:] *= noise_scale + + if return_scale: + return gt_boxes, points, noise_scale + return gt_boxes, points + +def global_scaling_with_roi_boxes(gt_boxes, roi_boxes, points, scale_range, return_scale=False): + """ + Args: + gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading] + points: (M, 3 + C), + scale_range: [min, max] + Returns: + """ + if scale_range[1] - scale_range[0] < 1e-3: + return gt_boxes, points + noise_scale = np.random.uniform(scale_range[0], scale_range[1]) + points[:, :3] *= noise_scale + gt_boxes[:, :6] *= noise_scale + roi_boxes[:,:, [0,1,2,3,4,5,7,8]] *= noise_scale + if return_scale: + return gt_boxes,roi_boxes, points, noise_scale + return gt_boxes, roi_boxes, points + + +def random_image_flip_horizontal(image, depth_map, gt_boxes, calib): + """ + Performs random horizontal flip augmentation + Args: + image: (H_image, W_image, 3), Image + depth_map: (H_depth, W_depth), Depth map + gt_boxes: (N, 7), 3D box labels in LiDAR coordinates [x, y, z, w, l, h, ry] + calib: calibration.Calibration, Calibration object + Returns: + aug_image: (H_image, W_image, 3), Augmented image + aug_depth_map: (H_depth, W_depth), Augmented depth map + aug_gt_boxes: (N, 7), Augmented 3D box labels in LiDAR coordinates [x, y, z, w, l, h, ry] + """ + # Randomly augment with 50% chance + enable = np.random.choice([False, True], replace=False, p=[0.5, 0.5]) + + if enable: + # Flip images + aug_image = np.fliplr(image) + aug_depth_map = np.fliplr(depth_map) + + # Flip 3D gt_boxes by flipping the centroids in image space + aug_gt_boxes = copy.copy(gt_boxes) + locations = aug_gt_boxes[:, :3] + img_pts, img_depth = calib.lidar_to_img(locations) + W = image.shape[1] + img_pts[:, 0] = W - img_pts[:, 0] + pts_rect = calib.img_to_rect(u=img_pts[:, 0], v=img_pts[:, 1], depth_rect=img_depth) + pts_lidar = calib.rect_to_lidar(pts_rect) + aug_gt_boxes[:, :3] = pts_lidar + aug_gt_boxes[:, 6] = -1 * aug_gt_boxes[:, 6] + + else: + aug_image = image + aug_depth_map = depth_map + aug_gt_boxes = gt_boxes + + return aug_image, aug_depth_map, aug_gt_boxes + + +def random_local_translation_along_x(gt_boxes, points, offset_range): + """ + Args: + gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]] + points: (M, 3 + C), + offset_range: [min max]] + Returns: + """ + # augs = {} + for idx, box in enumerate(gt_boxes): + offset = np.random.uniform(offset_range[0], offset_range[1]) + # augs[f'object_{idx}'] = offset + points_in_box, mask = get_points_in_box(points, box) + points[mask, 0] += offset + + gt_boxes[idx, 0] += offset + + # if gt_boxes.shape[1] > 7: + # gt_boxes[idx, 7] += offset + + return gt_boxes, points + + +def random_local_translation_along_y(gt_boxes, points, offset_range): + """ + Args: + gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]] + points: (M, 3 + C), + offset_range: [min max]] + Returns: + """ + # augs = {} + for idx, box in enumerate(gt_boxes): + offset = np.random.uniform(offset_range[0], offset_range[1]) + # augs[f'object_{idx}'] = offset + points_in_box, mask = get_points_in_box(points, box) + points[mask, 1] += offset + + gt_boxes[idx, 1] += offset + + # if gt_boxes.shape[1] > 8: + # gt_boxes[idx, 8] += offset + + return gt_boxes, points + + +def random_local_translation_along_z(gt_boxes, points, offset_range): + """ + Args: + gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]] + points: (M, 3 + C), + offset_range: [min max]] + Returns: + """ + # augs = {} + for idx, box in enumerate(gt_boxes): + offset = np.random.uniform(offset_range[0], offset_range[1]) + # augs[f'object_{idx}'] = offset + points_in_box, mask = get_points_in_box(points, box) + points[mask, 2] += offset + + gt_boxes[idx, 2] += offset + + return gt_boxes, points + + +def global_frustum_dropout_top(gt_boxes, points, intensity_range): + """ + Args: + gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]], + points: (M, 3 + C), + intensity: [min, max] + Returns: + """ + intensity = np.random.uniform(intensity_range[0], intensity_range[1]) + # threshold = max - length * uniform(0 ~ 0.2) + threshold = np.max(points[:, 2]) - intensity * (np.max(points[:, 2]) - np.min(points[:, 2])) + + points = points[points[:, 2] < threshold] + gt_boxes = gt_boxes[gt_boxes[:, 2] < threshold] + return gt_boxes, points + + +def global_frustum_dropout_bottom(gt_boxes, points, intensity_range): + """ + Args: + gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]], + points: (M, 3 + C), + intensity: [min, max] + Returns: + """ + intensity = np.random.uniform(intensity_range[0], intensity_range[1]) + + threshold = np.min(points[:, 2]) + intensity * (np.max(points[:, 2]) - np.min(points[:, 2])) + points = points[points[:, 2] > threshold] + gt_boxes = gt_boxes[gt_boxes[:, 2] > threshold] + + return gt_boxes, points + + +def global_frustum_dropout_left(gt_boxes, points, intensity_range): + """ + Args: + gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]], + points: (M, 3 + C), + intensity: [min, max] + Returns: + """ + intensity = np.random.uniform(intensity_range[0], intensity_range[1]) + + threshold = np.max(points[:, 1]) - intensity * (np.max(points[:, 1]) - np.min(points[:, 1])) + points = points[points[:, 1] < threshold] + gt_boxes = gt_boxes[gt_boxes[:, 1] < threshold] + + return gt_boxes, points + + +def global_frustum_dropout_right(gt_boxes, points, intensity_range): + """ + Args: + gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]], + points: (M, 3 + C), + intensity: [min, max] + Returns: + """ + intensity = np.random.uniform(intensity_range[0], intensity_range[1]) + + threshold = np.min(points[:, 1]) + intensity * (np.max(points[:, 1]) - np.min(points[:, 1])) + points = points[points[:, 1] > threshold] + gt_boxes = gt_boxes[gt_boxes[:, 1] > threshold] + + return gt_boxes, points + + +def local_scaling(gt_boxes, points, scale_range): + """ + Args: + gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading] + points: (M, 3 + C), + scale_range: [min, max] + Returns: + """ + if scale_range[1] - scale_range[0] < 1e-3: + return gt_boxes, points + + # augs = {} + for idx, box in enumerate(gt_boxes): + noise_scale = np.random.uniform(scale_range[0], scale_range[1]) + # augs[f'object_{idx}'] = noise_scale + points_in_box, mask = get_points_in_box(points, box) + + # tranlation to axis center + points[mask, 0] -= box[0] + points[mask, 1] -= box[1] + points[mask, 2] -= box[2] + + # apply scaling + points[mask, :3] *= noise_scale + + # tranlation back to original position + points[mask, 0] += box[0] + points[mask, 1] += box[1] + points[mask, 2] += box[2] + + gt_boxes[idx, 3:6] *= noise_scale + return gt_boxes, points + + +def local_rotation(gt_boxes, points, rot_range): + """ + Args: + gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]] + points: (M, 3 + C), + rot_range: [min, max] + Returns: + """ + # augs = {} + for idx, box in enumerate(gt_boxes): + noise_rotation = np.random.uniform(rot_range[0], rot_range[1]) + # augs[f'object_{idx}'] = noise_rotation + points_in_box, mask = get_points_in_box(points, box) + + centroid_x = box[0] + centroid_y = box[1] + centroid_z = box[2] + + # tranlation to axis center + points[mask, 0] -= centroid_x + points[mask, 1] -= centroid_y + points[mask, 2] -= centroid_z + box[0] -= centroid_x + box[1] -= centroid_y + box[2] -= centroid_z + + # apply rotation + points[mask, :] = common_utils.rotate_points_along_z(points[np.newaxis, mask, :], np.array([noise_rotation]))[0] + box[0:3] = common_utils.rotate_points_along_z(box[np.newaxis, np.newaxis, 0:3], np.array([noise_rotation]))[0][0] + + # tranlation back to original position + points[mask, 0] += centroid_x + points[mask, 1] += centroid_y + points[mask, 2] += centroid_z + box[0] += centroid_x + box[1] += centroid_y + box[2] += centroid_z + + gt_boxes[idx, 6] += noise_rotation + if gt_boxes.shape[1] > 8: + gt_boxes[idx, 7:9] = common_utils.rotate_points_along_z( + np.hstack((gt_boxes[idx, 7:9], np.zeros((gt_boxes.shape[0], 1))))[np.newaxis, :, :], + np.array([noise_rotation]) + )[0][:, 0:2] + + return gt_boxes, points + + +def local_frustum_dropout_top(gt_boxes, points, intensity_range): + """ + Args: + gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]], + points: (M, 3 + C), + intensity: [min, max] + Returns: + """ + for idx, box in enumerate(gt_boxes): + x, y, z, dx, dy, dz = box[0], box[1], box[2], box[3], box[4], box[5] + + intensity = np.random.uniform(intensity_range[0], intensity_range[1]) + points_in_box, mask = get_points_in_box(points, box) + threshold = (z + dz / 2) - intensity * dz + + points = points[np.logical_not(np.logical_and(mask, points[:, 2] >= threshold))] + + return gt_boxes, points + + +def local_frustum_dropout_bottom(gt_boxes, points, intensity_range): + """ + Args: + gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]], + points: (M, 3 + C), + intensity: [min, max] + Returns: + """ + for idx, box in enumerate(gt_boxes): + x, y, z, dx, dy, dz = box[0], box[1], box[2], box[3], box[4], box[5] + + intensity = np.random.uniform(intensity_range[0], intensity_range[1]) + points_in_box, mask = get_points_in_box(points, box) + threshold = (z - dz / 2) + intensity * dz + + points = points[np.logical_not(np.logical_and(mask, points[:, 2] <= threshold))] + + return gt_boxes, points + + +def local_frustum_dropout_left(gt_boxes, points, intensity_range): + """ + Args: + gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]], + points: (M, 3 + C), + intensity: [min, max] + Returns: + """ + for idx, box in enumerate(gt_boxes): + x, y, z, dx, dy, dz = box[0], box[1], box[2], box[3], box[4], box[5] + + intensity = np.random.uniform(intensity_range[0], intensity_range[1]) + points_in_box, mask = get_points_in_box(points, box) + threshold = (y + dy / 2) - intensity * dy + + points = points[np.logical_not(np.logical_and(mask, points[:, 1] >= threshold))] + + return gt_boxes, points + + +def local_frustum_dropout_right(gt_boxes, points, intensity_range): + """ + Args: + gt_boxes: (N, 7), [x, y, z, dx, dy, dz, heading, [vx], [vy]], + points: (M, 3 + C), + intensity: [min, max] + Returns: + """ + for idx, box in enumerate(gt_boxes): + x, y, z, dx, dy, dz = box[0], box[1], box[2], box[3], box[4], box[5] + + intensity = np.random.uniform(intensity_range[0], intensity_range[1]) + points_in_box, mask = get_points_in_box(points, box) + threshold = (y - dy / 2) + intensity * dy + + points = points[np.logical_not(np.logical_and(mask, points[:, 1] <= threshold))] + + return gt_boxes, points + + +def get_points_in_box(points, gt_box): + x, y, z = points[:, 0], points[:, 1], points[:, 2] + cx, cy, cz = gt_box[0], gt_box[1], gt_box[2] + dx, dy, dz, rz = gt_box[3], gt_box[4], gt_box[5], gt_box[6] + shift_x, shift_y, shift_z = x - cx, y - cy, z - cz + + MARGIN = 1e-1 + cosa, sina = math.cos(-rz), math.sin(-rz) + local_x = shift_x * cosa + shift_y * (-sina) + local_y = shift_x * sina + shift_y * cosa + + mask = np.logical_and(abs(shift_z) <= dz / 2.0, + np.logical_and(abs(local_x) <= dx / 2.0 + MARGIN, + abs(local_y) <= dy / 2.0 + MARGIN)) + + points = points[mask] + + return points, mask + + +def get_pyramids(boxes): + pyramid_orders = np.array([ + [0, 1, 5, 4], + [4, 5, 6, 7], + [7, 6, 2, 3], + [3, 2, 1, 0], + [1, 2, 6, 5], + [0, 4, 7, 3] + ]) + boxes_corners = box_utils.boxes_to_corners_3d(boxes).reshape(-1, 24) + + pyramid_list = [] + for order in pyramid_orders: + # frustum polygon: 5 corners, 5 surfaces + pyramid = np.concatenate(( + boxes[:, 0:3], + boxes_corners[:, 3 * order[0]: 3 * order[0] + 3], + boxes_corners[:, 3 * order[1]: 3 * order[1] + 3], + boxes_corners[:, 3 * order[2]: 3 * order[2] + 3], + boxes_corners[:, 3 * order[3]: 3 * order[3] + 3]), axis=1) + pyramid_list.append(pyramid[:, None, :]) + pyramids = np.concatenate(pyramid_list, axis=1) # [N, 6, 15], 15=5*3 + return pyramids + + +def one_hot(x, num_class=1): + if num_class is None: + num_class = 1 + ohx = np.zeros((len(x), num_class)) + ohx[range(len(x)), x] = 1 + return ohx + + +def points_in_pyramids_mask(points, pyramids): + pyramids = pyramids.reshape(-1, 5, 3) + flags = np.zeros((points.shape[0], pyramids.shape[0]), dtype=np.bool) + for i, pyramid in enumerate(pyramids): + flags[:, i] = np.logical_or(flags[:, i], box_utils.in_hull(points[:, 0:3], pyramid)) + return flags + + +def local_pyramid_dropout(gt_boxes, points, dropout_prob, pyramids=None): + if pyramids is None: + pyramids = get_pyramids(gt_boxes).reshape([-1, 6, 5, 3]) # each six surface of boxes: [num_boxes, 6, 15=3*5] + drop_pyramid_indices = np.random.randint(0, 6, (pyramids.shape[0])) + drop_pyramid_one_hot = one_hot(drop_pyramid_indices, num_class=6) + drop_box_mask = np.random.uniform(0, 1, (pyramids.shape[0])) <= dropout_prob + if np.sum(drop_box_mask) != 0: + drop_pyramid_mask = (np.tile(drop_box_mask[:, None], [1, 6]) * drop_pyramid_one_hot) > 0 + drop_pyramids = pyramids[drop_pyramid_mask] + point_masks = points_in_pyramids_mask(points, drop_pyramids) + points = points[np.logical_not(point_masks.any(-1))] + # print(drop_box_mask) + pyramids = pyramids[np.logical_not(drop_box_mask)] + return gt_boxes, points, pyramids + + +def local_pyramid_sparsify(gt_boxes, points, prob, max_num_pts, pyramids=None): + if pyramids is None: + pyramids = get_pyramids(gt_boxes).reshape([-1, 6, 5, 3]) # each six surface of boxes: [num_boxes, 6, 15=3*5] + if pyramids.shape[0] > 0: + sparsity_prob, sparsity_num = prob, max_num_pts + sparsify_pyramid_indices = np.random.randint(0, 6, (pyramids.shape[0])) + sparsify_pyramid_one_hot = one_hot(sparsify_pyramid_indices, num_class=6) + sparsify_box_mask = np.random.uniform(0, 1, (pyramids.shape[0])) <= sparsity_prob + sparsify_pyramid_mask = (np.tile(sparsify_box_mask[:, None], [1, 6]) * sparsify_pyramid_one_hot) > 0 + # print(sparsify_box_mask) + + pyramid_sampled = pyramids[sparsify_pyramid_mask] # (-1,6,5,3)[(num_sample,6)] + # print(pyramid_sampled.shape) + pyramid_sampled_point_masks = points_in_pyramids_mask(points, pyramid_sampled) + pyramid_sampled_points_num = pyramid_sampled_point_masks.sum(0) # the number of points in each surface pyramid + valid_pyramid_sampled_mask = pyramid_sampled_points_num > sparsity_num # only much than sparsity_num should be sparse + + sparsify_pyramids = pyramid_sampled[valid_pyramid_sampled_mask] + if sparsify_pyramids.shape[0] > 0: + point_masks = pyramid_sampled_point_masks[:, valid_pyramid_sampled_mask] + remain_points = points[ + np.logical_not(point_masks.any(-1))] # points which outside the down sampling pyramid + to_sparsify_points = [points[point_masks[:, i]] for i in range(point_masks.shape[1])] + + sparsified_points = [] + for sample in to_sparsify_points: + sampled_indices = np.random.choice(sample.shape[0], size=sparsity_num, replace=False) + sparsified_points.append(sample[sampled_indices]) + sparsified_points = np.concatenate(sparsified_points, axis=0) + points = np.concatenate([remain_points, sparsified_points], axis=0) + pyramids = pyramids[np.logical_not(sparsify_box_mask)] + return gt_boxes, points, pyramids + + +def local_pyramid_swap(gt_boxes, points, prob, max_num_pts, pyramids=None): + def get_points_ratio(points, pyramid): + surface_center = (pyramid[3:6] + pyramid[6:9] + pyramid[9:12] + pyramid[12:]) / 4.0 + vector_0, vector_1, vector_2 = pyramid[6:9] - pyramid[3:6], pyramid[12:] - pyramid[3:6], pyramid[0:3] - surface_center + alphas = ((points[:, 0:3] - pyramid[3:6]) * vector_0).sum(-1) / np.power(vector_0, 2).sum() + betas = ((points[:, 0:3] - pyramid[3:6]) * vector_1).sum(-1) / np.power(vector_1, 2).sum() + gammas = ((points[:, 0:3] - surface_center) * vector_2).sum(-1) / np.power(vector_2, 2).sum() + return [alphas, betas, gammas] + + def recover_points_by_ratio(points_ratio, pyramid): + alphas, betas, gammas = points_ratio + surface_center = (pyramid[3:6] + pyramid[6:9] + pyramid[9:12] + pyramid[12:]) / 4.0 + vector_0, vector_1, vector_2 = pyramid[6:9] - pyramid[3:6], pyramid[12:] - pyramid[3:6], pyramid[0:3] - surface_center + points = (alphas[:, None] * vector_0 + betas[:, None] * vector_1) + pyramid[3:6] + gammas[:, None] * vector_2 + return points + + def recover_points_intensity_by_ratio(points_intensity_ratio, max_intensity, min_intensity): + return points_intensity_ratio * (max_intensity - min_intensity) + min_intensity + + # swap partition + if pyramids is None: + pyramids = get_pyramids(gt_boxes).reshape([-1, 6, 5, 3]) # each six surface of boxes: [num_boxes, 6, 15=3*5] + swap_prob, num_thres = prob, max_num_pts + swap_pyramid_mask = np.random.uniform(0, 1, (pyramids.shape[0])) <= swap_prob + + if swap_pyramid_mask.sum() > 0: + point_masks = points_in_pyramids_mask(points, pyramids) + point_nums = point_masks.sum(0).reshape(pyramids.shape[0], -1) # [N, 6] + non_zero_pyramids_mask = point_nums > num_thres # ingore dropout pyramids or highly occluded pyramids + selected_pyramids = non_zero_pyramids_mask * swap_pyramid_mask[:, + None] # selected boxes and all their valid pyramids + # print(selected_pyramids) + if selected_pyramids.sum() > 0: + # get to_swap pyramids + index_i, index_j = np.nonzero(selected_pyramids) + selected_pyramid_indices = [np.random.choice(index_j[index_i == i]) \ + if e and (index_i == i).any() else 0 for i, e in + enumerate(swap_pyramid_mask)] + selected_pyramids_mask = selected_pyramids * one_hot(selected_pyramid_indices, num_class=6) == 1 + to_swap_pyramids = pyramids[selected_pyramids_mask] + + # get swapped pyramids + index_i, index_j = np.nonzero(selected_pyramids_mask) + non_zero_pyramids_mask[selected_pyramids_mask] = False + swapped_index_i = np.array([np.random.choice(np.where(non_zero_pyramids_mask[:, j])[0]) if \ + np.where(non_zero_pyramids_mask[:, j])[0].shape[0] > 0 else + index_i[i] for i, j in enumerate(index_j.tolist())]) + swapped_indicies = np.concatenate([swapped_index_i[:, None], index_j[:, None]], axis=1) + swapped_pyramids = pyramids[ + swapped_indicies[:, 0].astype(np.int32), swapped_indicies[:, 1].astype(np.int32)] + + # concat to_swap&swapped pyramids + swap_pyramids = np.concatenate([to_swap_pyramids, swapped_pyramids], axis=0) + swap_point_masks = points_in_pyramids_mask(points, swap_pyramids) + remain_points = points[np.logical_not(swap_point_masks.any(-1))] + + # swap pyramids + points_res = [] + num_swapped_pyramids = swapped_pyramids.shape[0] + for i in range(num_swapped_pyramids): + to_swap_pyramid = to_swap_pyramids[i] + swapped_pyramid = swapped_pyramids[i] + + to_swap_points = points[swap_point_masks[:, i]] + swapped_points = points[swap_point_masks[:, i + num_swapped_pyramids]] + # for intensity transform + to_swap_points_intensity_ratio = (to_swap_points[:, -1:] - to_swap_points[:, -1:].min()) / \ + np.clip( + (to_swap_points[:, -1:].max() - to_swap_points[:, -1:].min()), + 1e-6, 1) + swapped_points_intensity_ratio = (swapped_points[:, -1:] - swapped_points[:, -1:].min()) / \ + np.clip( + (swapped_points[:, -1:].max() - swapped_points[:, -1:].min()), + 1e-6, 1) + + to_swap_points_ratio = get_points_ratio(to_swap_points, to_swap_pyramid.reshape(15)) + swapped_points_ratio = get_points_ratio(swapped_points, swapped_pyramid.reshape(15)) + new_to_swap_points = recover_points_by_ratio(swapped_points_ratio, to_swap_pyramid.reshape(15)) + new_swapped_points = recover_points_by_ratio(to_swap_points_ratio, swapped_pyramid.reshape(15)) + # for intensity transform + new_to_swap_points_intensity = recover_points_intensity_by_ratio( + swapped_points_intensity_ratio, to_swap_points[:, -1:].max(), + to_swap_points[:, -1:].min()) + new_swapped_points_intensity = recover_points_intensity_by_ratio( + to_swap_points_intensity_ratio, swapped_points[:, -1:].max(), + swapped_points[:, -1:].min()) + + # new_to_swap_points = np.concatenate([new_to_swap_points, swapped_points[:, -1:]], axis=1) + # new_swapped_points = np.concatenate([new_swapped_points, to_swap_points[:, -1:]], axis=1) + + new_to_swap_points = np.concatenate([new_to_swap_points, new_to_swap_points_intensity], axis=1) + new_swapped_points = np.concatenate([new_swapped_points, new_swapped_points_intensity], axis=1) + + points_res.append(new_to_swap_points) + points_res.append(new_swapped_points) + + points_res = np.concatenate(points_res, axis=0) + points = np.concatenate([remain_points, points_res], axis=0) + return gt_boxes, points diff --git a/toolbox/openpcdet/pcdet/datasets/augmentor/data_augmentor.py b/toolbox/openpcdet/pcdet/datasets/augmentor/data_augmentor.py new file mode 100644 index 000000000..56acebc81 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/augmentor/data_augmentor.py @@ -0,0 +1,319 @@ +from functools import partial + +import numpy as np +from PIL import Image + +from ...utils import common_utils +from . import augmentor_utils, database_sampler + + +class DataAugmentor(object): + def __init__(self, root_path, augmentor_configs, class_names, logger=None): + self.root_path = root_path + self.class_names = class_names + self.logger = logger + + self.data_augmentor_queue = [] + aug_config_list = augmentor_configs if isinstance(augmentor_configs, list) \ + else augmentor_configs.AUG_CONFIG_LIST + + for cur_cfg in aug_config_list: + if not isinstance(augmentor_configs, list): + if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST: + continue + cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg) + self.data_augmentor_queue.append(cur_augmentor) + + def disable_augmentation(self, augmentor_configs): + self.data_augmentor_queue = [] + aug_config_list = augmentor_configs if isinstance(augmentor_configs, list) \ + else augmentor_configs.AUG_CONFIG_LIST + + for cur_cfg in aug_config_list: + if not isinstance(augmentor_configs, list): + if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST: + continue + cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg) + self.data_augmentor_queue.append(cur_augmentor) + + def gt_sampling(self, config=None): + db_sampler = database_sampler.DataBaseSampler( + root_path=self.root_path, + sampler_cfg=config, + class_names=self.class_names, + logger=self.logger + ) + return db_sampler + + def __getstate__(self): + d = dict(self.__dict__) + del d['logger'] + return d + + def __setstate__(self, d): + self.__dict__.update(d) + + def random_world_flip(self, data_dict=None, config=None): + if data_dict is None: + return partial(self.random_world_flip, config=config) + gt_boxes, points = data_dict['gt_boxes'], data_dict['points'] + for cur_axis in config['ALONG_AXIS_LIST']: + assert cur_axis in ['x', 'y'] + gt_boxes, points, enable = getattr(augmentor_utils, 'random_flip_along_%s' % cur_axis)( + gt_boxes, points, return_flip=True + ) + data_dict['flip_%s'%cur_axis] = enable + if 'roi_boxes' in data_dict.keys(): + num_frame, num_rois,dim = data_dict['roi_boxes'].shape + roi_boxes, _, _ = getattr(augmentor_utils, 'random_flip_along_%s' % cur_axis)( + data_dict['roi_boxes'].reshape(-1,dim), np.zeros([1,3]), return_flip=True, enable=enable + ) + data_dict['roi_boxes'] = roi_boxes.reshape(num_frame, num_rois,dim) + + data_dict['gt_boxes'] = gt_boxes + data_dict['points'] = points + return data_dict + + def random_world_rotation(self, data_dict=None, config=None): + if data_dict is None: + return partial(self.random_world_rotation, config=config) + rot_range = config['WORLD_ROT_ANGLE'] + if not isinstance(rot_range, list): + rot_range = [-rot_range, rot_range] + gt_boxes, points, noise_rot = augmentor_utils.global_rotation( + data_dict['gt_boxes'], data_dict['points'], rot_range=rot_range, return_rot=True + ) + if 'roi_boxes' in data_dict.keys(): + num_frame, num_rois,dim = data_dict['roi_boxes'].shape + roi_boxes, _, _ = augmentor_utils.global_rotation( + data_dict['roi_boxes'].reshape(-1, dim), np.zeros([1, 3]), rot_range=rot_range, return_rot=True, noise_rotation=noise_rot) + data_dict['roi_boxes'] = roi_boxes.reshape(num_frame, num_rois,dim) + + data_dict['gt_boxes'] = gt_boxes + data_dict['points'] = points + data_dict['noise_rot'] = noise_rot + return data_dict + + def random_world_scaling(self, data_dict=None, config=None): + if data_dict is None: + return partial(self.random_world_scaling, config=config) + + if 'roi_boxes' in data_dict.keys(): + gt_boxes, roi_boxes, points, noise_scale = augmentor_utils.global_scaling_with_roi_boxes( + data_dict['gt_boxes'], data_dict['roi_boxes'], data_dict['points'], config['WORLD_SCALE_RANGE'], return_scale=True + ) + data_dict['roi_boxes'] = roi_boxes + else: + gt_boxes, points, noise_scale = augmentor_utils.global_scaling( + data_dict['gt_boxes'], data_dict['points'], config['WORLD_SCALE_RANGE'], return_scale=True + ) + + data_dict['gt_boxes'] = gt_boxes + data_dict['points'] = points + data_dict['noise_scale'] = noise_scale + return data_dict + + def random_image_flip(self, data_dict=None, config=None): + if data_dict is None: + return partial(self.random_image_flip, config=config) + images = data_dict["images"] + depth_maps = data_dict["depth_maps"] + gt_boxes = data_dict['gt_boxes'] + gt_boxes2d = data_dict["gt_boxes2d"] + calib = data_dict["calib"] + for cur_axis in config['ALONG_AXIS_LIST']: + assert cur_axis in ['horizontal'] + images, depth_maps, gt_boxes = getattr(augmentor_utils, 'random_image_flip_%s' % cur_axis)( + images, depth_maps, gt_boxes, calib, + ) + + data_dict['images'] = images + data_dict['depth_maps'] = depth_maps + data_dict['gt_boxes'] = gt_boxes + return data_dict + + def random_world_translation(self, data_dict=None, config=None): + if data_dict is None: + return partial(self.random_world_translation, config=config) + noise_translate_std = config['NOISE_TRANSLATE_STD'] + assert len(noise_translate_std) == 3 + noise_translate = np.array([ + np.random.normal(0, noise_translate_std[0], 1), + np.random.normal(0, noise_translate_std[1], 1), + np.random.normal(0, noise_translate_std[2], 1), + ], dtype=np.float32).T + + gt_boxes, points = data_dict['gt_boxes'], data_dict['points'] + points[:, :3] += noise_translate + gt_boxes[:, :3] += noise_translate + + if 'roi_boxes' in data_dict.keys(): + data_dict['roi_boxes'][:, :3] += noise_translate + + data_dict['gt_boxes'] = gt_boxes + data_dict['points'] = points + data_dict['noise_translate'] = noise_translate + return data_dict + + def random_local_translation(self, data_dict=None, config=None): + """ + Please check the correctness of it before using. + """ + if data_dict is None: + return partial(self.random_local_translation, config=config) + offset_range = config['LOCAL_TRANSLATION_RANGE'] + gt_boxes, points = data_dict['gt_boxes'], data_dict['points'] + for cur_axis in config['ALONG_AXIS_LIST']: + assert cur_axis in ['x', 'y', 'z'] + gt_boxes, points = getattr(augmentor_utils, 'random_local_translation_along_%s' % cur_axis)( + gt_boxes, points, offset_range, + ) + + data_dict['gt_boxes'] = gt_boxes + data_dict['points'] = points + return data_dict + + def random_local_rotation(self, data_dict=None, config=None): + """ + Please check the correctness of it before using. + """ + if data_dict is None: + return partial(self.random_local_rotation, config=config) + rot_range = config['LOCAL_ROT_ANGLE'] + if not isinstance(rot_range, list): + rot_range = [-rot_range, rot_range] + gt_boxes, points = augmentor_utils.local_rotation( + data_dict['gt_boxes'], data_dict['points'], rot_range=rot_range + ) + + data_dict['gt_boxes'] = gt_boxes + data_dict['points'] = points + return data_dict + + def random_local_scaling(self, data_dict=None, config=None): + """ + Please check the correctness of it before using. + """ + if data_dict is None: + return partial(self.random_local_scaling, config=config) + gt_boxes, points = augmentor_utils.local_scaling( + data_dict['gt_boxes'], data_dict['points'], config['LOCAL_SCALE_RANGE'] + ) + + data_dict['gt_boxes'] = gt_boxes + data_dict['points'] = points + return data_dict + + def random_world_frustum_dropout(self, data_dict=None, config=None): + """ + Please check the correctness of it before using. + """ + if data_dict is None: + return partial(self.random_world_frustum_dropout, config=config) + + intensity_range = config['INTENSITY_RANGE'] + gt_boxes, points = data_dict['gt_boxes'], data_dict['points'] + for direction in config['DIRECTION']: + assert direction in ['top', 'bottom', 'left', 'right'] + gt_boxes, points = getattr(augmentor_utils, 'global_frustum_dropout_%s' % direction)( + gt_boxes, points, intensity_range, + ) + + data_dict['gt_boxes'] = gt_boxes + data_dict['points'] = points + return data_dict + + def random_local_frustum_dropout(self, data_dict=None, config=None): + """ + Please check the correctness of it before using. + """ + if data_dict is None: + return partial(self.random_local_frustum_dropout, config=config) + + intensity_range = config['INTENSITY_RANGE'] + gt_boxes, points = data_dict['gt_boxes'], data_dict['points'] + for direction in config['DIRECTION']: + assert direction in ['top', 'bottom', 'left', 'right'] + gt_boxes, points = getattr(augmentor_utils, 'local_frustum_dropout_%s' % direction)( + gt_boxes, points, intensity_range, + ) + + data_dict['gt_boxes'] = gt_boxes + data_dict['points'] = points + return data_dict + + def random_local_pyramid_aug(self, data_dict=None, config=None): + """ + Refer to the paper: + SE-SSD: Self-Ensembling Single-Stage Object Detector From Point Cloud + """ + if data_dict is None: + return partial(self.random_local_pyramid_aug, config=config) + + gt_boxes, points = data_dict['gt_boxes'], data_dict['points'] + + gt_boxes, points, pyramids = augmentor_utils.local_pyramid_dropout(gt_boxes, points, config['DROP_PROB']) + gt_boxes, points, pyramids = augmentor_utils.local_pyramid_sparsify(gt_boxes, points, + config['SPARSIFY_PROB'], + config['SPARSIFY_MAX_NUM'], + pyramids) + gt_boxes, points = augmentor_utils.local_pyramid_swap(gt_boxes, points, + config['SWAP_PROB'], + config['SWAP_MAX_NUM'], + pyramids) + data_dict['gt_boxes'] = gt_boxes + data_dict['points'] = points + return data_dict + + def imgaug(self, data_dict=None, config=None): + if data_dict is None: + return partial(self.imgaug, config=config) + imgs = data_dict["camera_imgs"] + img_process_infos = data_dict['img_process_infos'] + new_imgs = [] + for img, img_process_info in zip(imgs, img_process_infos): + flip = False + if config.RAND_FLIP and np.random.choice([0, 1]): + flip = True + rotate = np.random.uniform(*config.ROT_LIM) + # aug images + if flip: + img = img.transpose(method=Image.FLIP_LEFT_RIGHT) + img = img.rotate(rotate) + img_process_info[2] = flip + img_process_info[3] = rotate + new_imgs.append(img) + + data_dict["camera_imgs"] = new_imgs + return data_dict + + def forward(self, data_dict): + """ + Args: + data_dict: + points: (N, 3 + C_in) + gt_boxes: optional, (N, 7) [x, y, z, dx, dy, dz, heading] + gt_names: optional, (N), string + ... + + Returns: + """ + for cur_augmentor in self.data_augmentor_queue: + data_dict = cur_augmentor(data_dict=data_dict) + + data_dict['gt_boxes'][:, 6] = common_utils.limit_period( + data_dict['gt_boxes'][:, 6], offset=0.5, period=2 * np.pi + ) + # if 'calib' in data_dict: + # data_dict.pop('calib') + if 'road_plane' in data_dict: + data_dict.pop('road_plane') + if 'gt_boxes_mask' in data_dict: + gt_boxes_mask = data_dict['gt_boxes_mask'] + data_dict['gt_boxes'] = data_dict['gt_boxes'][gt_boxes_mask] + data_dict['gt_names'] = data_dict['gt_names'][gt_boxes_mask] + if 'gt_boxes2d' in data_dict: + data_dict['gt_boxes2d'] = data_dict['gt_boxes2d'][gt_boxes_mask] + + data_dict.pop('gt_boxes_mask') + return data_dict diff --git a/toolbox/openpcdet/pcdet/datasets/augmentor/database_sampler.py b/toolbox/openpcdet/pcdet/datasets/augmentor/database_sampler.py new file mode 100644 index 000000000..105708a60 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/augmentor/database_sampler.py @@ -0,0 +1,502 @@ +import pickle + +import os +import copy +import numpy as np +from skimage import io +import torch +import SharedArray +import torch.distributed as dist + +from ...ops.iou3d_nms import iou3d_nms_utils +from ...utils import box_utils, common_utils, calibration_kitti +from pcdet.datasets.kitti.kitti_object_eval_python import kitti_common + +class DataBaseSampler(object): + def __init__(self, root_path, sampler_cfg, class_names, logger=None): + self.root_path = root_path + self.class_names = class_names + self.sampler_cfg = sampler_cfg + + self.img_aug_type = sampler_cfg.get('IMG_AUG_TYPE', None) + self.img_aug_iou_thresh = sampler_cfg.get('IMG_AUG_IOU_THRESH', 0.5) + + self.logger = logger + self.db_infos = {} + for class_name in class_names: + self.db_infos[class_name] = [] + + self.use_shared_memory = sampler_cfg.get('USE_SHARED_MEMORY', False) + + for db_info_path in sampler_cfg.DB_INFO_PATH: + db_info_path = self.root_path.resolve() / db_info_path + if not db_info_path.exists(): + assert len(sampler_cfg.DB_INFO_PATH) == 1 + sampler_cfg.DB_INFO_PATH[0] = sampler_cfg.BACKUP_DB_INFO['DB_INFO_PATH'] + sampler_cfg.DB_DATA_PATH[0] = sampler_cfg.BACKUP_DB_INFO['DB_DATA_PATH'] + db_info_path = self.root_path.resolve() / sampler_cfg.DB_INFO_PATH[0] + sampler_cfg.NUM_POINT_FEATURES = sampler_cfg.BACKUP_DB_INFO['NUM_POINT_FEATURES'] + + with open(str(db_info_path), 'rb') as f: + infos = pickle.load(f) + [self.db_infos[cur_class].extend(infos[cur_class]) for cur_class in class_names] + + for func_name, val in sampler_cfg.PREPARE.items(): + self.db_infos = getattr(self, func_name)(self.db_infos, val) + + self.gt_database_data_key = self.load_db_to_shared_memory() if self.use_shared_memory else None + + self.sample_groups = {} + self.sample_class_num = {} + self.limit_whole_scene = sampler_cfg.get('LIMIT_WHOLE_SCENE', False) + + for x in sampler_cfg.SAMPLE_GROUPS: + class_name, sample_num = x.split(':') + if class_name not in class_names: + continue + self.sample_class_num[class_name] = sample_num + self.sample_groups[class_name] = { + 'sample_num': sample_num, + 'pointer': len(self.db_infos[class_name]), + 'indices': np.arange(len(self.db_infos[class_name])) + } + + def __getstate__(self): + d = dict(self.__dict__) + del d['logger'] + return d + + def __setstate__(self, d): + self.__dict__.update(d) + + def __del__(self): + if self.use_shared_memory: + self.logger.info('Deleting GT database from shared memory') + cur_rank, num_gpus = common_utils.get_dist_info() + sa_key = self.sampler_cfg.DB_DATA_PATH[0] + if cur_rank % num_gpus == 0 and os.path.exists(f"/dev/shm/{sa_key}"): + SharedArray.delete(f"shm://{sa_key}") + + if num_gpus > 1: + dist.barrier() + self.logger.info('GT database has been removed from shared memory') + + def load_db_to_shared_memory(self): + self.logger.info('Loading GT database to shared memory') + cur_rank, world_size, num_gpus = common_utils.get_dist_info(return_gpu_per_machine=True) + + assert self.sampler_cfg.DB_DATA_PATH.__len__() == 1, 'Current only support single DB_DATA' + db_data_path = self.root_path.resolve() / self.sampler_cfg.DB_DATA_PATH[0] + sa_key = self.sampler_cfg.DB_DATA_PATH[0] + + if cur_rank % num_gpus == 0 and not os.path.exists(f"/dev/shm/{sa_key}"): + gt_database_data = np.load(db_data_path) + common_utils.sa_create(f"shm://{sa_key}", gt_database_data) + + if num_gpus > 1: + dist.barrier() + self.logger.info('GT database has been saved to shared memory') + return sa_key + + def filter_by_difficulty(self, db_infos, removed_difficulty): + new_db_infos = {} + for key, dinfos in db_infos.items(): + pre_len = len(dinfos) + new_db_infos[key] = [ + info for info in dinfos + if info['difficulty'] not in removed_difficulty + ] + if self.logger is not None: + self.logger.info('Database filter by difficulty %s: %d => %d' % (key, pre_len, len(new_db_infos[key]))) + return new_db_infos + + def filter_by_min_points(self, db_infos, min_gt_points_list): + for name_num in min_gt_points_list: + name, min_num = name_num.split(':') + min_num = int(min_num) + if min_num > 0 and name in db_infos.keys(): + filtered_infos = [] + for info in db_infos[name]: + if info['num_points_in_gt'] >= min_num: + filtered_infos.append(info) + + if self.logger is not None: + self.logger.info('Database filter by min points %s: %d => %d' % + (name, len(db_infos[name]), len(filtered_infos))) + db_infos[name] = filtered_infos + + return db_infos + + def sample_with_fixed_number(self, class_name, sample_group): + """ + Args: + class_name: + sample_group: + Returns: + + """ + sample_num, pointer, indices = int(sample_group['sample_num']), sample_group['pointer'], sample_group['indices'] + if pointer >= len(self.db_infos[class_name]): + indices = np.random.permutation(len(self.db_infos[class_name])) + pointer = 0 + + sampled_dict = [self.db_infos[class_name][idx] for idx in indices[pointer: pointer + sample_num]] + pointer += sample_num + sample_group['pointer'] = pointer + sample_group['indices'] = indices + return sampled_dict + + @staticmethod + def put_boxes_on_road_planes(gt_boxes, road_planes, calib): + """ + Only validate in KITTIDataset + Args: + gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...] + road_planes: [a, b, c, d] + calib: + + Returns: + """ + a, b, c, d = road_planes + center_cam = calib.lidar_to_rect(gt_boxes[:, 0:3]) + cur_height_cam = (-d - a * center_cam[:, 0] - c * center_cam[:, 2]) / b + center_cam[:, 1] = cur_height_cam + cur_lidar_height = calib.rect_to_lidar(center_cam)[:, 2] + mv_height = gt_boxes[:, 2] - gt_boxes[:, 5] / 2 - cur_lidar_height + gt_boxes[:, 2] -= mv_height # lidar view + return gt_boxes, mv_height + + def copy_paste_to_image_kitti(self, data_dict, crop_feat, gt_number, point_idxes=None): + kitti_img_aug_type = 'by_depth' + kitti_img_aug_use_type = 'annotation' + + image = data_dict['images'] + boxes3d = data_dict['gt_boxes'] + boxes2d = data_dict['gt_boxes2d'] + corners_lidar = box_utils.boxes_to_corners_3d(boxes3d) + if 'depth' in kitti_img_aug_type: + paste_order = boxes3d[:,0].argsort() + paste_order = paste_order[::-1] + else: + paste_order = np.arange(len(boxes3d),dtype=np.int) + + if 'reverse' in kitti_img_aug_type: + paste_order = paste_order[::-1] + + paste_mask = -255 * np.ones(image.shape[:2], dtype=np.int) + fg_mask = np.zeros(image.shape[:2], dtype=np.int) + overlap_mask = np.zeros(image.shape[:2], dtype=np.int) + depth_mask = np.zeros((*image.shape[:2], 2), dtype=np.float) + points_2d, depth_2d = data_dict['calib'].lidar_to_img(data_dict['points'][:,:3]) + points_2d[:,0] = np.clip(points_2d[:,0], a_min=0, a_max=image.shape[1]-1) + points_2d[:,1] = np.clip(points_2d[:,1], a_min=0, a_max=image.shape[0]-1) + points_2d = points_2d.astype(np.int) + for _order in paste_order: + _box2d = boxes2d[_order] + image[_box2d[1]:_box2d[3],_box2d[0]:_box2d[2]] = crop_feat[_order] + overlap_mask[_box2d[1]:_box2d[3],_box2d[0]:_box2d[2]] += \ + (paste_mask[_box2d[1]:_box2d[3],_box2d[0]:_box2d[2]] > 0).astype(np.int) + paste_mask[_box2d[1]:_box2d[3],_box2d[0]:_box2d[2]] = _order + + if 'cover' in kitti_img_aug_use_type: + # HxWx2 for min and max depth of each box region + depth_mask[_box2d[1]:_box2d[3],_box2d[0]:_box2d[2],0] = corners_lidar[_order,:,0].min() + depth_mask[_box2d[1]:_box2d[3],_box2d[0]:_box2d[2],1] = corners_lidar[_order,:,0].max() + + # foreground area of original point cloud in image plane + if _order < gt_number: + fg_mask[_box2d[1]:_box2d[3],_box2d[0]:_box2d[2]] = 1 + + data_dict['images'] = image + + # if not self.joint_sample: + # return data_dict + + new_mask = paste_mask[points_2d[:,1], points_2d[:,0]]==(point_idxes+gt_number) + if False: # self.keep_raw: + raw_mask = (point_idxes == -1) + else: + raw_fg = (fg_mask == 1) & (paste_mask >= 0) & (paste_mask < gt_number) + raw_bg = (fg_mask == 0) & (paste_mask < 0) + raw_mask = raw_fg[points_2d[:,1], points_2d[:,0]] | raw_bg[points_2d[:,1], points_2d[:,0]] + keep_mask = new_mask | raw_mask + data_dict['points_2d'] = points_2d + + if 'annotation' in kitti_img_aug_use_type: + data_dict['points'] = data_dict['points'][keep_mask] + data_dict['points_2d'] = data_dict['points_2d'][keep_mask] + elif 'projection' in kitti_img_aug_use_type: + overlap_mask[overlap_mask>=1] = 1 + data_dict['overlap_mask'] = overlap_mask + if 'cover' in kitti_img_aug_use_type: + data_dict['depth_mask'] = depth_mask + + return data_dict + + def collect_image_crops_kitti(self, info, data_dict, obj_points, sampled_gt_boxes, sampled_gt_boxes2d, idx): + calib_file = kitti_common.get_calib_path(int(info['image_idx']), self.root_path, relative_path=False) + sampled_calib = calibration_kitti.Calibration(calib_file) + points_2d, depth_2d = sampled_calib.lidar_to_img(obj_points[:,:3]) + + if True: # self.point_refine: + # align calibration metrics for points + points_ract = data_dict['calib'].img_to_rect(points_2d[:,0], points_2d[:,1], depth_2d) + points_lidar = data_dict['calib'].rect_to_lidar(points_ract) + obj_points[:, :3] = points_lidar + # align calibration metrics for boxes + box3d_raw = sampled_gt_boxes[idx].reshape(1,-1) + box3d_coords = box_utils.boxes_to_corners_3d(box3d_raw)[0] + box3d_box, box3d_depth = sampled_calib.lidar_to_img(box3d_coords) + box3d_coord_rect = data_dict['calib'].img_to_rect(box3d_box[:,0], box3d_box[:,1], box3d_depth) + box3d_rect = box_utils.corners_rect_to_camera(box3d_coord_rect).reshape(1,-1) + box3d_lidar = box_utils.boxes3d_kitti_camera_to_lidar(box3d_rect, data_dict['calib']) + box2d = box_utils.boxes3d_kitti_camera_to_imageboxes(box3d_rect, data_dict['calib'], + data_dict['images'].shape[:2]) + sampled_gt_boxes[idx] = box3d_lidar[0] + sampled_gt_boxes2d[idx] = box2d[0] + + obj_idx = idx * np.ones(len(obj_points), dtype=np.int) + + # copy crops from images + img_path = self.root_path / f'training/image_2/{info["image_idx"]}.png' + raw_image = io.imread(img_path) + raw_image = raw_image.astype(np.float32) + raw_center = info['bbox'].reshape(2,2).mean(0) + new_box = sampled_gt_boxes2d[idx].astype(np.int) + new_shape = np.array([new_box[2]-new_box[0], new_box[3]-new_box[1]]) + raw_box = np.concatenate([raw_center-new_shape/2, raw_center+new_shape/2]).astype(np.int) + raw_box[0::2] = np.clip(raw_box[0::2], a_min=0, a_max=raw_image.shape[1]) + raw_box[1::2] = np.clip(raw_box[1::2], a_min=0, a_max=raw_image.shape[0]) + if (raw_box[2]-raw_box[0])!=new_shape[0] or (raw_box[3]-raw_box[1])!=new_shape[1]: + new_center = new_box.reshape(2,2).mean(0) + new_shape = np.array([raw_box[2]-raw_box[0], raw_box[3]-raw_box[1]]) + new_box = np.concatenate([new_center-new_shape/2, new_center+new_shape/2]).astype(np.int) + + img_crop2d = raw_image[raw_box[1]:raw_box[3],raw_box[0]:raw_box[2]] / 255 + + return new_box, img_crop2d, obj_points, obj_idx + + def sample_gt_boxes_2d_kitti(self, data_dict, sampled_boxes, valid_mask): + mv_height = None + # filter out box2d iou > thres + if self.sampler_cfg.get('USE_ROAD_PLANE', False): + sampled_boxes, mv_height = self.put_boxes_on_road_planes( + sampled_boxes, data_dict['road_plane'], data_dict['calib'] + ) + + # sampled_boxes2d = np.stack([x['bbox'] for x in sampled_dict], axis=0).astype(np.float32) + boxes3d_camera = box_utils.boxes3d_lidar_to_kitti_camera(sampled_boxes, data_dict['calib']) + sampled_boxes2d = box_utils.boxes3d_kitti_camera_to_imageboxes(boxes3d_camera, data_dict['calib'], + data_dict['images'].shape[:2]) + sampled_boxes2d = torch.Tensor(sampled_boxes2d) + existed_boxes2d = torch.Tensor(data_dict['gt_boxes2d']) + iou2d1 = box_utils.pairwise_iou(sampled_boxes2d, existed_boxes2d).cpu().numpy() + iou2d2 = box_utils.pairwise_iou(sampled_boxes2d, sampled_boxes2d).cpu().numpy() + iou2d2[range(sampled_boxes2d.shape[0]), range(sampled_boxes2d.shape[0])] = 0 + iou2d1 = iou2d1 if iou2d1.shape[1] > 0 else iou2d2 + + ret_valid_mask = ((iou2d1.max(axis=1) min_time - 1e-6) + obj_points = obj_points[time_mask] + + large_sampled_gt_boxes = box_utils.enlarge_box3d( + sampled_gt_boxes[:, 0:7], extra_width=self.sampler_cfg.REMOVE_EXTRA_WIDTH + ) + points = box_utils.remove_points_in_boxes3d(points, large_sampled_gt_boxes) + points = np.concatenate([obj_points[:, :points.shape[-1]], points], axis=0) + gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0) + gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes], axis=0) + data_dict['gt_boxes'] = gt_boxes + data_dict['gt_names'] = gt_names + data_dict['points'] = points + + if self.img_aug_type is not None: + data_dict = self.copy_paste_to_image(img_aug_gt_dict, data_dict, points) + + return data_dict + + def __call__(self, data_dict): + """ + Args: + data_dict: + gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...] + + Returns: + + """ + gt_boxes = data_dict['gt_boxes'] + gt_names = data_dict['gt_names'].astype(str) + existed_boxes = gt_boxes + total_valid_sampled_dict = [] + sampled_mv_height = [] + sampled_gt_boxes2d = [] + + for class_name, sample_group in self.sample_groups.items(): + if self.limit_whole_scene: + num_gt = np.sum(class_name == gt_names) + sample_group['sample_num'] = str(int(self.sample_class_num[class_name]) - num_gt) + if int(sample_group['sample_num']) > 0: + sampled_dict = self.sample_with_fixed_number(class_name, sample_group) + + sampled_boxes = np.stack([x['box3d_lidar'] for x in sampled_dict], axis=0).astype(np.float32) + + assert not self.sampler_cfg.get('DATABASE_WITH_FAKELIDAR', False), 'Please use latest codes to generate GT_DATABASE' + + iou1 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], existed_boxes[:, 0:7]) + iou2 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], sampled_boxes[:, 0:7]) + iou2[range(sampled_boxes.shape[0]), range(sampled_boxes.shape[0])] = 0 + iou1 = iou1 if iou1.shape[1] > 0 else iou2 + valid_mask = ((iou1.max(axis=1) + iou2.max(axis=1)) == 0) + + if self.img_aug_type is not None: + sampled_boxes2d, mv_height, valid_mask = self.sample_gt_boxes_2d(data_dict, sampled_boxes, valid_mask) + sampled_gt_boxes2d.append(sampled_boxes2d) + if mv_height is not None: + sampled_mv_height.append(mv_height) + + valid_mask = valid_mask.nonzero()[0] + valid_sampled_dict = [sampled_dict[x] for x in valid_mask] + valid_sampled_boxes = sampled_boxes[valid_mask] + + existed_boxes = np.concatenate((existed_boxes, valid_sampled_boxes[:, :existed_boxes.shape[-1]]), axis=0) + total_valid_sampled_dict.extend(valid_sampled_dict) + + sampled_gt_boxes = existed_boxes[gt_boxes.shape[0]:, :] + + if total_valid_sampled_dict.__len__() > 0: + sampled_gt_boxes2d = np.concatenate(sampled_gt_boxes2d, axis=0) if len(sampled_gt_boxes2d) > 0 else None + sampled_mv_height = np.concatenate(sampled_mv_height, axis=0) if len(sampled_mv_height) > 0 else None + + data_dict = self.add_sampled_boxes_to_scene( + data_dict, sampled_gt_boxes, total_valid_sampled_dict, sampled_mv_height, sampled_gt_boxes2d + ) + + data_dict.pop('gt_boxes_mask') + return data_dict diff --git a/toolbox/openpcdet/pcdet/datasets/custom/__init__.py b/toolbox/openpcdet/pcdet/datasets/custom/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/datasets/custom/custom_dataset.py b/toolbox/openpcdet/pcdet/datasets/custom/custom_dataset.py new file mode 100644 index 000000000..3715210b1 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/custom/custom_dataset.py @@ -0,0 +1,283 @@ +import copy +import pickle +import os + +import numpy as np + +from ...ops.roiaware_pool3d import roiaware_pool3d_utils +from ...utils import box_utils, common_utils +from ..dataset import DatasetTemplate + + +class CustomDataset(DatasetTemplate): + def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None): + """ + Args: + root_path: + dataset_cfg: + class_names: + training: + logger: + """ + super().__init__( + dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger + ) + self.split = self.dataset_cfg.DATA_SPLIT[self.mode] + + split_dir = os.path.join(self.root_path, 'ImageSets', (self.split + '.txt')) + self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if os.path.exists(split_dir) else None + + self.custom_infos = [] + self.include_data(self.mode) + self.map_class_to_kitti = self.dataset_cfg.MAP_CLASS_TO_KITTI + + def include_data(self, mode): + self.logger.info('Loading Custom dataset.') + custom_infos = [] + + for info_path in self.dataset_cfg.INFO_PATH[mode]: + info_path = self.root_path / info_path + if not info_path.exists(): + continue + with open(info_path, 'rb') as f: + infos = pickle.load(f) + custom_infos.extend(infos) + + self.custom_infos.extend(custom_infos) + self.logger.info('Total samples for CUSTOM dataset: %d' % (len(custom_infos))) + + def get_label(self, idx): + label_file = self.root_path / 'labels' / ('%s.txt' % idx) + assert label_file.exists() + with open(label_file, 'r') as f: + lines = f.readlines() + + # [N, 8]: (x y z dx dy dz heading_angle category_id) + gt_boxes = [] + gt_names = [] + for line in lines: + line_list = line.strip().split(' ') + gt_boxes.append(line_list[:-1]) + gt_names.append(line_list[-1]) + + return np.array(gt_boxes, dtype=np.float32), np.array(gt_names) + + def get_lidar(self, idx): + lidar_file = self.root_path / 'points' / ('%s.npy' % idx) + assert lidar_file.exists() + point_features = np.load(lidar_file) + return point_features + + def set_split(self, split): + super().__init__( + dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, + root_path=self.root_path, logger=self.logger + ) + self.split = split + + split_dir = self.root_path / 'ImageSets' / (self.split + '.txt') + self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None + + def __len__(self): + if self._merge_all_iters_to_one_epoch: + return len(self.sample_id_list) * self.total_epochs + + return len(self.custom_infos) + + def __getitem__(self, index): + if self._merge_all_iters_to_one_epoch: + index = index % len(self.custom_infos) + + info = copy.deepcopy(self.custom_infos[index]) + sample_idx = info['point_cloud']['lidar_idx'] + points = self.get_lidar(sample_idx) + input_dict = { + 'frame_id': self.sample_id_list[index], + 'points': points + } + + if 'annos' in info: + annos = info['annos'] + annos = common_utils.drop_info_with_name(annos, name='DontCare') + gt_names = annos['name'] + gt_boxes_lidar = annos['gt_boxes_lidar'] + input_dict.update({ + 'gt_names': gt_names, + 'gt_boxes': gt_boxes_lidar + }) + + data_dict = self.prepare_data(data_dict=input_dict) + + return data_dict + + def evaluation(self, det_annos, class_names, **kwargs): + if 'annos' not in self.custom_infos[0].keys(): + return 'No ground-truth boxes for evaluation', {} + + def kitti_eval(eval_det_annos, eval_gt_annos, map_name_to_kitti): + from ..kitti.kitti_object_eval_python import eval as kitti_eval + from ..kitti import kitti_utils + + kitti_utils.transform_annotations_to_kitti_format(eval_det_annos, map_name_to_kitti=map_name_to_kitti) + kitti_utils.transform_annotations_to_kitti_format( + eval_gt_annos, map_name_to_kitti=map_name_to_kitti, + info_with_fakelidar=self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False) + ) + kitti_class_names = [map_name_to_kitti[x] for x in class_names] + ap_result_str, ap_dict = kitti_eval.get_official_eval_result( + gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=kitti_class_names + ) + return ap_result_str, ap_dict + + eval_det_annos = copy.deepcopy(det_annos) + eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.custom_infos] + + if kwargs['eval_metric'] == 'kitti': + ap_result_str, ap_dict = kitti_eval(eval_det_annos, eval_gt_annos, self.map_class_to_kitti) + else: + raise NotImplementedError + + return ap_result_str, ap_dict + + def get_infos(self, class_names, num_workers=4, has_label=True, sample_id_list=None, num_features=4): + import concurrent.futures as futures + + def process_single_scene(sample_idx): + print('%s sample_idx: %s' % (self.split, sample_idx)) + info = {} + pc_info = {'num_features': num_features, 'lidar_idx': sample_idx} + info['point_cloud'] = pc_info + + if has_label: + annotations = {} + gt_boxes_lidar, name = self.get_label(sample_idx) + annotations['name'] = name + annotations['gt_boxes_lidar'] = gt_boxes_lidar[:, :7] + info['annos'] = annotations + + return info + + sample_id_list = sample_id_list if sample_id_list is not None else self.sample_id_list + + # create a thread pool to improve the velocity + with futures.ThreadPoolExecutor(num_workers) as executor: + infos = executor.map(process_single_scene, sample_id_list) + return list(infos) + + def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'): + import torch + + database_save_path = Path(self.root_path) / ('gt_database' if split == 'train' else ('gt_database_%s' % split)) + db_info_save_path = Path(self.root_path) / ('custom_dbinfos_%s.pkl' % split) + + database_save_path.mkdir(parents=True, exist_ok=True) + all_db_infos = {} + + with open(info_path, 'rb') as f: + infos = pickle.load(f) + + for k in range(len(infos)): + print('gt_database sample: %d/%d' % (k + 1, len(infos))) + info = infos[k] + sample_idx = info['point_cloud']['lidar_idx'] + points = self.get_lidar(sample_idx) + annos = info['annos'] + names = annos['name'] + gt_boxes = annos['gt_boxes_lidar'] + + num_obj = gt_boxes.shape[0] + point_indices = roiaware_pool3d_utils.points_in_boxes_cpu( + torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes) + ).numpy() # (nboxes, npoints) + + for i in range(num_obj): + filename = '%s_%s_%d.bin' % (sample_idx, names[i], i) + filepath = database_save_path / filename + gt_points = points[point_indices[i] > 0] + + gt_points[:, :3] -= gt_boxes[i, :3] + with open(filepath, 'w') as f: + gt_points.tofile(f) + + if (used_classes is None) or names[i] in used_classes: + db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin + db_info = {'name': names[i], 'path': db_path, 'gt_idx': i, + 'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0]} + if names[i] in all_db_infos: + all_db_infos[names[i]].append(db_info) + else: + all_db_infos[names[i]] = [db_info] + + # Output the num of all classes in database + for k, v in all_db_infos.items(): + print('Database %s: %d' % (k, len(v))) + + with open(db_info_save_path, 'wb') as f: + pickle.dump(all_db_infos, f) + + @staticmethod + def create_label_file_with_name_and_box(class_names, gt_names, gt_boxes, save_label_path): + with open(save_label_path, 'w') as f: + for idx in range(gt_boxes.shape[0]): + boxes = gt_boxes[idx] + name = gt_names[idx] + if name not in class_names: + continue + line = "{x} {y} {z} {l} {w} {h} {angle} {name}\n".format( + x=boxes[0], y=boxes[1], z=(boxes[2]), l=boxes[3], + w=boxes[4], h=boxes[5], angle=boxes[6], name=name + ) + f.write(line) + + +def create_custom_infos(dataset_cfg, class_names, data_path, save_path, workers=4): + dataset = CustomDataset( + dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, + training=False, logger=common_utils.create_logger() + ) + train_split, val_split = 'train', 'val' + num_features = len(dataset_cfg.POINT_FEATURE_ENCODING.src_feature_list) + + train_filename = save_path / ('custom_infos_%s.pkl' % train_split) + val_filename = save_path / ('custom_infos_%s.pkl' % val_split) + + print('------------------------Start to generate data infos------------------------') + + dataset.set_split(train_split) + custom_infos_train = dataset.get_infos( + class_names, num_workers=workers, has_label=True, num_features=num_features + ) + with open(train_filename, 'wb') as f: + pickle.dump(custom_infos_train, f) + print('Custom info train file is saved to %s' % train_filename) + + dataset.set_split(val_split) + custom_infos_val = dataset.get_infos( + class_names, num_workers=workers, has_label=True, num_features=num_features + ) + with open(val_filename, 'wb') as f: + pickle.dump(custom_infos_val, f) + print('Custom info train file is saved to %s' % val_filename) + + print('------------------------Start create groundtruth database for data augmentation------------------------') + dataset.set_split(train_split) + dataset.create_groundtruth_database(train_filename, split=train_split) + print('------------------------Data preparation done------------------------') + + +if __name__ == '__main__': + import sys + + if sys.argv.__len__() > 1 and sys.argv[1] == 'create_custom_infos': + import yaml + from pathlib import Path + from easydict import EasyDict + + dataset_cfg = EasyDict(yaml.safe_load(open(sys.argv[2]))) + ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve() + create_custom_infos( + dataset_cfg=dataset_cfg, + class_names=['Vehicle', 'Pedestrian', 'Cyclist'], + data_path=ROOT_DIR / 'data' / 'custom', + save_path=ROOT_DIR / 'data' / 'custom', + ) diff --git a/toolbox/openpcdet/pcdet/datasets/dataset.py b/toolbox/openpcdet/pcdet/datasets/dataset.py new file mode 100644 index 000000000..c1a7f6b03 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/dataset.py @@ -0,0 +1,325 @@ +from collections import defaultdict +from pathlib import Path + +import numpy as np +import torch +import torch.utils.data as torch_data + +from ..utils import common_utils +from .augmentor.data_augmentor import DataAugmentor +from .processor.data_processor import DataProcessor +from .processor.point_feature_encoder import PointFeatureEncoder + + +class DatasetTemplate(torch_data.Dataset): + def __init__(self, dataset_cfg=None, class_names=None, training=True, root_path=None, logger=None): + super().__init__() + self.dataset_cfg = dataset_cfg + self.training = training + self.class_names = class_names + self.logger = logger + self.root_path = root_path if root_path is not None else Path(self.dataset_cfg.DATA_PATH) + self.logger = logger + if self.dataset_cfg is None or class_names is None: + return + + self.point_cloud_range = np.array(self.dataset_cfg.POINT_CLOUD_RANGE, dtype=np.float32) + self.point_feature_encoder = PointFeatureEncoder( + self.dataset_cfg.POINT_FEATURE_ENCODING, + point_cloud_range=self.point_cloud_range + ) + self.data_augmentor = DataAugmentor( + self.root_path, self.dataset_cfg.DATA_AUGMENTOR, self.class_names, logger=self.logger + ) if self.training else None + self.data_processor = DataProcessor( + self.dataset_cfg.DATA_PROCESSOR, point_cloud_range=self.point_cloud_range, + training=self.training, num_point_features=self.point_feature_encoder.num_point_features + ) + + self.grid_size = self.data_processor.grid_size + self.voxel_size = self.data_processor.voxel_size + self.total_epochs = 0 + self._merge_all_iters_to_one_epoch = False + + if hasattr(self.data_processor, "depth_downsample_factor"): + self.depth_downsample_factor = self.data_processor.depth_downsample_factor + else: + self.depth_downsample_factor = None + + @property + def mode(self): + return 'train' if self.training else 'test' + + def __getstate__(self): + d = dict(self.__dict__) + del d['logger'] + return d + + def __setstate__(self, d): + self.__dict__.update(d) + + def generate_prediction_dicts(self, batch_dict, pred_dicts, class_names, output_path=None): + """ + Args: + batch_dict: + frame_id: + pred_dicts: list of pred_dicts + pred_boxes: (N, 7 or 9), Tensor + pred_scores: (N), Tensor + pred_labels: (N), Tensor + class_names: + output_path: + + Returns: + + """ + + def get_template_prediction(num_samples): + box_dim = 9 if self.dataset_cfg.get('TRAIN_WITH_SPEED', False) else 7 + ret_dict = { + 'name': np.zeros(num_samples), 'score': np.zeros(num_samples), + 'boxes_lidar': np.zeros([num_samples, box_dim]), 'pred_labels': np.zeros(num_samples) + } + return ret_dict + + def generate_single_sample_dict(box_dict): + pred_scores = box_dict['pred_scores'].cpu().numpy() + pred_boxes = box_dict['pred_boxes'].cpu().numpy() + pred_labels = box_dict['pred_labels'].cpu().numpy() + pred_dict = get_template_prediction(pred_scores.shape[0]) + if pred_scores.shape[0] == 0: + return pred_dict + + pred_dict['name'] = np.array(class_names)[pred_labels - 1] + pred_dict['score'] = pred_scores + pred_dict['boxes_lidar'] = pred_boxes + pred_dict['pred_labels'] = pred_labels + + return pred_dict + + annos = [] + for index, box_dict in enumerate(pred_dicts): + single_pred_dict = generate_single_sample_dict(box_dict) + single_pred_dict['frame_id'] = batch_dict['frame_id'][index] + if 'metadata' in batch_dict: + single_pred_dict['metadata'] = batch_dict['metadata'][index] + annos.append(single_pred_dict) + + return annos + + def merge_all_iters_to_one_epoch(self, merge=True, epochs=None): + if merge: + self._merge_all_iters_to_one_epoch = True + self.total_epochs = epochs + else: + self._merge_all_iters_to_one_epoch = False + + def __len__(self): + raise NotImplementedError + + def __getitem__(self, index): + """ + To support a custom dataset, implement this function to load the raw data (and labels), then transform them to + the unified normative coordinate and call the function self.prepare_data() to process the data and send them + to the model. + + Args: + index: + + Returns: + + """ + raise NotImplementedError + + def set_lidar_aug_matrix(self, data_dict): + """ + Get lidar augment matrix (4 x 4), which are used to recover orig point coordinates. + """ + lidar_aug_matrix = np.eye(4) + if 'flip_y' in data_dict.keys(): + flip_x = data_dict['flip_x'] + flip_y = data_dict['flip_y'] + if flip_x: + lidar_aug_matrix[:3,:3] = np.array([[1, 0, 0], [0, -1, 0], [0, 0, 1]]) @ lidar_aug_matrix[:3,:3] + if flip_y: + lidar_aug_matrix[:3,:3] = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]]) @ lidar_aug_matrix[:3,:3] + if 'noise_rot' in data_dict.keys(): + noise_rot = data_dict['noise_rot'] + lidar_aug_matrix[:3,:3] = common_utils.angle2matrix(torch.tensor(noise_rot)) @ lidar_aug_matrix[:3,:3] + if 'noise_scale' in data_dict.keys(): + noise_scale = data_dict['noise_scale'] + lidar_aug_matrix[:3,:3] *= noise_scale + if 'noise_translate' in data_dict.keys(): + noise_translate = data_dict['noise_translate'] + lidar_aug_matrix[:3,3:4] = noise_translate.T + data_dict['lidar_aug_matrix'] = lidar_aug_matrix + return data_dict + + def prepare_data(self, data_dict): + """ + Args: + data_dict: + points: optional, (N, 3 + C_in) + gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...] + gt_names: optional, (N), string + ... + + Returns: + data_dict: + frame_id: string + points: (N, 3 + C_in) + gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...] + gt_names: optional, (N), string + use_lead_xyz: bool + voxels: optional (num_voxels, max_points_per_voxel, 3 + C) + voxel_coords: optional (num_voxels, 3) + voxel_num_points: optional (num_voxels) + ... + """ + if self.training: + assert 'gt_boxes' in data_dict, 'gt_boxes should be provided for training' + gt_boxes_mask = np.array([n in self.class_names for n in data_dict['gt_names']], dtype=np.bool_) + + if 'calib' in data_dict: + calib = data_dict['calib'] + data_dict = self.data_augmentor.forward( + data_dict={ + **data_dict, + 'gt_boxes_mask': gt_boxes_mask + } + ) + if 'calib' in data_dict: + data_dict['calib'] = calib + data_dict = self.set_lidar_aug_matrix(data_dict) + if data_dict.get('gt_boxes', None) is not None: + selected = common_utils.keep_arrays_by_name(data_dict['gt_names'], self.class_names) + data_dict['gt_boxes'] = data_dict['gt_boxes'][selected] + data_dict['gt_names'] = data_dict['gt_names'][selected] + gt_classes = np.array([self.class_names.index(n) + 1 for n in data_dict['gt_names']], dtype=np.int32) + gt_boxes = np.concatenate((data_dict['gt_boxes'], gt_classes.reshape(-1, 1).astype(np.float32)), axis=1) + data_dict['gt_boxes'] = gt_boxes + + if data_dict.get('gt_boxes2d', None) is not None: + data_dict['gt_boxes2d'] = data_dict['gt_boxes2d'][selected] + + if data_dict.get('points', None) is not None: + data_dict = self.point_feature_encoder.forward(data_dict) + + data_dict = self.data_processor.forward( + data_dict=data_dict + ) + + if self.training and len(data_dict['gt_boxes']) == 0: + new_index = np.random.randint(self.__len__()) + return self.__getitem__(new_index) + + data_dict.pop('gt_names', None) + + return data_dict + + @staticmethod + def collate_batch(batch_list, _unused=False): + data_dict = defaultdict(list) + for cur_sample in batch_list: + for key, val in cur_sample.items(): + data_dict[key].append(val) + batch_size = len(batch_list) + ret = {} + batch_size_ratio = 1 + + for key, val in data_dict.items(): + try: + if key in ['voxels', 'voxel_num_points']: + if isinstance(val[0], list): + batch_size_ratio = len(val[0]) + val = [i for item in val for i in item] + ret[key] = np.concatenate(val, axis=0) + elif key in ['points', 'voxel_coords']: + coors = [] + if isinstance(val[0], list): + val = [i for item in val for i in item] + for i, coor in enumerate(val): + coor_pad = np.pad(coor, ((0, 0), (1, 0)), mode='constant', constant_values=i) + coors.append(coor_pad) + ret[key] = np.concatenate(coors, axis=0) + elif key in ['gt_boxes']: + max_gt = max([len(x) for x in val]) + batch_gt_boxes3d = np.zeros((batch_size, max_gt, val[0].shape[-1]), dtype=np.float32) + for k in range(batch_size): + batch_gt_boxes3d[k, :val[k].__len__(), :] = val[k] + ret[key] = batch_gt_boxes3d + + elif key in ['roi_boxes']: + max_gt = max([x.shape[1] for x in val]) + batch_gt_boxes3d = np.zeros((batch_size, val[0].shape[0], max_gt, val[0].shape[-1]), dtype=np.float32) + for k in range(batch_size): + batch_gt_boxes3d[k,:, :val[k].shape[1], :] = val[k] + ret[key] = batch_gt_boxes3d + + elif key in ['roi_scores', 'roi_labels']: + max_gt = max([x.shape[1] for x in val]) + batch_gt_boxes3d = np.zeros((batch_size, val[0].shape[0], max_gt), dtype=np.float32) + for k in range(batch_size): + batch_gt_boxes3d[k,:, :val[k].shape[1]] = val[k] + ret[key] = batch_gt_boxes3d + + elif key in ['gt_boxes2d']: + max_boxes = 0 + max_boxes = max([len(x) for x in val]) + batch_boxes2d = np.zeros((batch_size, max_boxes, val[0].shape[-1]), dtype=np.float32) + for k in range(batch_size): + if val[k].size > 0: + batch_boxes2d[k, :val[k].__len__(), :] = val[k] + ret[key] = batch_boxes2d + elif key in ["images", "depth_maps"]: + # Get largest image size (H, W) + max_h = 0 + max_w = 0 + for image in val: + max_h = max(max_h, image.shape[0]) + max_w = max(max_w, image.shape[1]) + + # Change size of images + images = [] + for image in val: + pad_h = common_utils.get_pad_params(desired_size=max_h, cur_size=image.shape[0]) + pad_w = common_utils.get_pad_params(desired_size=max_w, cur_size=image.shape[1]) + pad_width = (pad_h, pad_w) + pad_value = 0 + + if key == "images": + pad_width = (pad_h, pad_w, (0, 0)) + elif key == "depth_maps": + pad_width = (pad_h, pad_w) + + image_pad = np.pad(image, + pad_width=pad_width, + mode='constant', + constant_values=pad_value) + + images.append(image_pad) + ret[key] = np.stack(images, axis=0) + elif key in ['calib']: + ret[key] = val + elif key in ["points_2d"]: + max_len = max([len(_val) for _val in val]) + pad_value = 0 + points = [] + for _points in val: + pad_width = ((0, max_len-len(_points)), (0,0)) + points_pad = np.pad(_points, + pad_width=pad_width, + mode='constant', + constant_values=pad_value) + points.append(points_pad) + ret[key] = np.stack(points, axis=0) + elif key in ['camera_imgs']: + ret[key] = torch.stack([torch.stack(imgs,dim=0) for imgs in val],dim=0) + else: + ret[key] = np.stack(val, axis=0) + except: + print('Error in collate_batch: key=%s' % key) + raise TypeError + + ret['batch_size'] = batch_size * batch_size_ratio + return ret diff --git a/toolbox/openpcdet/pcdet/datasets/kitti/__init__.py b/toolbox/openpcdet/pcdet/datasets/kitti/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/datasets/kitti/kitti_dataset.py b/toolbox/openpcdet/pcdet/datasets/kitti/kitti_dataset.py new file mode 100644 index 000000000..411bd7509 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/kitti/kitti_dataset.py @@ -0,0 +1,484 @@ +import copy +import pickle + +import numpy as np +from skimage import io + +from . import kitti_utils +from ...ops.roiaware_pool3d import roiaware_pool3d_utils +from ...utils import box_utils, calibration_kitti, common_utils, object3d_kitti +from ..dataset import DatasetTemplate + + +class KittiDataset(DatasetTemplate): + def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None): + """ + Args: + root_path: + dataset_cfg: + class_names: + training: + logger: + """ + super().__init__( + dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger + ) + self.split = self.dataset_cfg.DATA_SPLIT[self.mode] + self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing') + + split_dir = self.root_path / 'ImageSets' / (self.split + '.txt') + self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None + + self.kitti_infos = [] + self.include_kitti_data(self.mode) + + def include_kitti_data(self, mode): + if self.logger is not None: + self.logger.info('Loading KITTI dataset') + kitti_infos = [] + + for info_path in self.dataset_cfg.INFO_PATH[mode]: + info_path = self.root_path / info_path + if not info_path.exists(): + continue + with open(info_path, 'rb') as f: + infos = pickle.load(f) + kitti_infos.extend(infos) + + self.kitti_infos.extend(kitti_infos) + + if self.logger is not None: + self.logger.info('Total samples for KITTI dataset: %d' % (len(kitti_infos))) + + def set_split(self, split): + super().__init__( + dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger + ) + self.split = split + self.root_split_path = self.root_path / ('training' if self.split != 'test' else 'testing') + + split_dir = self.root_path / 'ImageSets' / (self.split + '.txt') + self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None + + def get_lidar(self, idx): + lidar_file = self.root_split_path / 'velodyne' / ('%s.bin' % idx) + assert lidar_file.exists() + return np.fromfile(str(lidar_file), dtype=np.float32).reshape(-1, 4) + + def get_image(self, idx): + """ + Loads image for a sample + Args: + idx: int, Sample index + Returns: + image: (H, W, 3), RGB Image + """ + img_file = self.root_split_path / 'image_2' / ('%s.png' % idx) + assert img_file.exists() + image = io.imread(img_file) + image = image.astype(np.float32) + image /= 255.0 + return image + + def get_image_shape(self, idx): + img_file = self.root_split_path / 'image_2' / ('%s.png' % idx) + assert img_file.exists() + return np.array(io.imread(img_file).shape[:2], dtype=np.int32) + + def get_label(self, idx): + label_file = self.root_split_path / 'label_2' / ('%s.txt' % idx) + assert label_file.exists() + return object3d_kitti.get_objects_from_label(label_file) + + def get_depth_map(self, idx): + """ + Loads depth map for a sample + Args: + idx: str, Sample index + Returns: + depth: (H, W), Depth map + """ + depth_file = self.root_split_path / 'depth_2' / ('%s.png' % idx) + assert depth_file.exists() + depth = io.imread(depth_file) + depth = depth.astype(np.float32) + depth /= 256.0 + return depth + + def get_calib(self, idx): + calib_file = self.root_split_path / 'calib' / ('%s.txt' % idx) + assert calib_file.exists() + return calibration_kitti.Calibration(calib_file) + + def get_road_plane(self, idx): + plane_file = self.root_split_path / 'planes' / ('%s.txt' % idx) + if not plane_file.exists(): + return None + + with open(plane_file, 'r') as f: + lines = f.readlines() + lines = [float(i) for i in lines[3].split()] + plane = np.asarray(lines) + + # Ensure normal is always facing up, this is in the rectified camera coordinate + if plane[1] > 0: + plane = -plane + + norm = np.linalg.norm(plane[0:3]) + plane = plane / norm + return plane + + @staticmethod + def get_fov_flag(pts_rect, img_shape, calib): + """ + Args: + pts_rect: + img_shape: + calib: + + Returns: + + """ + pts_img, pts_rect_depth = calib.rect_to_img(pts_rect) + val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1]) + val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0]) + val_flag_merge = np.logical_and(val_flag_1, val_flag_2) + pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0) + + return pts_valid_flag + + def get_infos(self, num_workers=4, has_label=True, count_inside_pts=True, sample_id_list=None): + import concurrent.futures as futures + + def process_single_scene(sample_idx): + print('%s sample_idx: %s' % (self.split, sample_idx)) + info = {} + pc_info = {'num_features': 4, 'lidar_idx': sample_idx} + info['point_cloud'] = pc_info + + image_info = {'image_idx': sample_idx, 'image_shape': self.get_image_shape(sample_idx)} + info['image'] = image_info + calib = self.get_calib(sample_idx) + + P2 = np.concatenate([calib.P2, np.array([[0., 0., 0., 1.]])], axis=0) + R0_4x4 = np.zeros([4, 4], dtype=calib.R0.dtype) + R0_4x4[3, 3] = 1. + R0_4x4[:3, :3] = calib.R0 + V2C_4x4 = np.concatenate([calib.V2C, np.array([[0., 0., 0., 1.]])], axis=0) + calib_info = {'P2': P2, 'R0_rect': R0_4x4, 'Tr_velo_to_cam': V2C_4x4} + + info['calib'] = calib_info + + if has_label: + obj_list = self.get_label(sample_idx) + annotations = {} + annotations['name'] = np.array([obj.cls_type for obj in obj_list]) + annotations['truncated'] = np.array([obj.truncation for obj in obj_list]) + annotations['occluded'] = np.array([obj.occlusion for obj in obj_list]) + annotations['alpha'] = np.array([obj.alpha for obj in obj_list]) + annotations['bbox'] = np.concatenate([obj.box2d.reshape(1, 4) for obj in obj_list], axis=0) + annotations['dimensions'] = np.array([[obj.l, obj.h, obj.w] for obj in obj_list]) # lhw(camera) format + annotations['location'] = np.concatenate([obj.loc.reshape(1, 3) for obj in obj_list], axis=0) + annotations['rotation_y'] = np.array([obj.ry for obj in obj_list]) + annotations['score'] = np.array([obj.score for obj in obj_list]) + annotations['difficulty'] = np.array([obj.level for obj in obj_list], np.int32) + + num_objects = len([obj.cls_type for obj in obj_list if obj.cls_type != 'DontCare']) + num_gt = len(annotations['name']) + index = list(range(num_objects)) + [-1] * (num_gt - num_objects) + annotations['index'] = np.array(index, dtype=np.int32) + + loc = annotations['location'][:num_objects] + dims = annotations['dimensions'][:num_objects] + rots = annotations['rotation_y'][:num_objects] + loc_lidar = calib.rect_to_lidar(loc) + l, h, w = dims[:, 0:1], dims[:, 1:2], dims[:, 2:3] + loc_lidar[:, 2] += h[:, 0] / 2 + gt_boxes_lidar = np.concatenate([loc_lidar, l, w, h, -(np.pi / 2 + rots[..., np.newaxis])], axis=1) + annotations['gt_boxes_lidar'] = gt_boxes_lidar + + info['annos'] = annotations + + if count_inside_pts: + points = self.get_lidar(sample_idx) + calib = self.get_calib(sample_idx) + pts_rect = calib.lidar_to_rect(points[:, 0:3]) + + fov_flag = self.get_fov_flag(pts_rect, info['image']['image_shape'], calib) + pts_fov = points[fov_flag] + corners_lidar = box_utils.boxes_to_corners_3d(gt_boxes_lidar) + num_points_in_gt = -np.ones(num_gt, dtype=np.int32) + + for k in range(num_objects): + flag = box_utils.in_hull(pts_fov[:, 0:3], corners_lidar[k]) + num_points_in_gt[k] = flag.sum() + annotations['num_points_in_gt'] = num_points_in_gt + + return info + + sample_id_list = sample_id_list if sample_id_list is not None else self.sample_id_list + with futures.ThreadPoolExecutor(num_workers) as executor: + infos = executor.map(process_single_scene, sample_id_list) + return list(infos) + + def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'): + import torch + + database_save_path = Path(self.root_path) / ('gt_database' if split == 'train' else ('gt_database_%s' % split)) + db_info_save_path = Path(self.root_path) / ('kitti_dbinfos_%s.pkl' % split) + + database_save_path.mkdir(parents=True, exist_ok=True) + all_db_infos = {} + + with open(info_path, 'rb') as f: + infos = pickle.load(f) + + for k in range(len(infos)): + print('gt_database sample: %d/%d' % (k + 1, len(infos))) + info = infos[k] + sample_idx = info['point_cloud']['lidar_idx'] + points = self.get_lidar(sample_idx) + annos = info['annos'] + names = annos['name'] + difficulty = annos['difficulty'] + bbox = annos['bbox'] + gt_boxes = annos['gt_boxes_lidar'] + + num_obj = gt_boxes.shape[0] + point_indices = roiaware_pool3d_utils.points_in_boxes_cpu( + torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes) + ).numpy() # (nboxes, npoints) + + for i in range(num_obj): + filename = '%s_%s_%d.bin' % (sample_idx, names[i], i) + filepath = database_save_path / filename + gt_points = points[point_indices[i] > 0] + + gt_points[:, :3] -= gt_boxes[i, :3] + with open(filepath, 'w') as f: + gt_points.tofile(f) + + if (used_classes is None) or names[i] in used_classes: + db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin + db_info = {'name': names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i, + 'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0], + 'difficulty': difficulty[i], 'bbox': bbox[i], 'score': annos['score'][i]} + if names[i] in all_db_infos: + all_db_infos[names[i]].append(db_info) + else: + all_db_infos[names[i]] = [db_info] + for k, v in all_db_infos.items(): + print('Database %s: %d' % (k, len(v))) + + with open(db_info_save_path, 'wb') as f: + pickle.dump(all_db_infos, f) + + @staticmethod + def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None): + """ + Args: + batch_dict: + frame_id: + pred_dicts: list of pred_dicts + pred_boxes: (N, 7), Tensor + pred_scores: (N), Tensor + pred_labels: (N), Tensor + class_names: + output_path: + + Returns: + + """ + def get_template_prediction(num_samples): + ret_dict = { + 'name': np.zeros(num_samples), 'truncated': np.zeros(num_samples), + 'occluded': np.zeros(num_samples), 'alpha': np.zeros(num_samples), + 'bbox': np.zeros([num_samples, 4]), 'dimensions': np.zeros([num_samples, 3]), + 'location': np.zeros([num_samples, 3]), 'rotation_y': np.zeros(num_samples), + 'score': np.zeros(num_samples), 'boxes_lidar': np.zeros([num_samples, 7]) + } + return ret_dict + + def generate_single_sample_dict(batch_index, box_dict): + pred_scores = box_dict['pred_scores'].cpu().numpy() + pred_boxes = box_dict['pred_boxes'].cpu().numpy() + pred_labels = box_dict['pred_labels'].cpu().numpy() + pred_dict = get_template_prediction(pred_scores.shape[0]) + if pred_scores.shape[0] == 0: + return pred_dict + + calib = batch_dict['calib'][batch_index] + image_shape = batch_dict['image_shape'][batch_index].cpu().numpy() + pred_boxes_camera = box_utils.boxes3d_lidar_to_kitti_camera(pred_boxes, calib) + pred_boxes_img = box_utils.boxes3d_kitti_camera_to_imageboxes( + pred_boxes_camera, calib, image_shape=image_shape + ) + + pred_dict['name'] = np.array(class_names)[pred_labels - 1] + pred_dict['alpha'] = -np.arctan2(-pred_boxes[:, 1], pred_boxes[:, 0]) + pred_boxes_camera[:, 6] + pred_dict['bbox'] = pred_boxes_img + pred_dict['dimensions'] = pred_boxes_camera[:, 3:6] + pred_dict['location'] = pred_boxes_camera[:, 0:3] + pred_dict['rotation_y'] = pred_boxes_camera[:, 6] + pred_dict['score'] = pred_scores + pred_dict['boxes_lidar'] = pred_boxes + + return pred_dict + + annos = [] + for index, box_dict in enumerate(pred_dicts): + frame_id = batch_dict['frame_id'][index] + + single_pred_dict = generate_single_sample_dict(index, box_dict) + single_pred_dict['frame_id'] = frame_id + annos.append(single_pred_dict) + + if output_path is not None: + cur_det_file = output_path / ('%s.txt' % frame_id) + with open(cur_det_file, 'w') as f: + bbox = single_pred_dict['bbox'] + loc = single_pred_dict['location'] + dims = single_pred_dict['dimensions'] # lhw -> hwl + + for idx in range(len(bbox)): + print('%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f' + % (single_pred_dict['name'][idx], single_pred_dict['alpha'][idx], + bbox[idx][0], bbox[idx][1], bbox[idx][2], bbox[idx][3], + dims[idx][1], dims[idx][2], dims[idx][0], loc[idx][0], + loc[idx][1], loc[idx][2], single_pred_dict['rotation_y'][idx], + single_pred_dict['score'][idx]), file=f) + + return annos + + def evaluation(self, det_annos, class_names, **kwargs): + if 'annos' not in self.kitti_infos[0].keys(): + return None, {} + + from .kitti_object_eval_python import eval as kitti_eval + + eval_det_annos = copy.deepcopy(det_annos) + eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.kitti_infos] + ap_result_str, ap_dict = kitti_eval.get_official_eval_result(eval_gt_annos, eval_det_annos, class_names) + + return ap_result_str, ap_dict + + def __len__(self): + if self._merge_all_iters_to_one_epoch: + return len(self.kitti_infos) * self.total_epochs + + return len(self.kitti_infos) + + def __getitem__(self, index): + # index = 4 + if self._merge_all_iters_to_one_epoch: + index = index % len(self.kitti_infos) + + info = copy.deepcopy(self.kitti_infos[index]) + + sample_idx = info['point_cloud']['lidar_idx'] + img_shape = info['image']['image_shape'] + calib = self.get_calib(sample_idx) + get_item_list = self.dataset_cfg.get('GET_ITEM_LIST', ['points']) + + input_dict = { + 'frame_id': sample_idx, + 'calib': calib, + } + + if 'annos' in info: + annos = info['annos'] + annos = common_utils.drop_info_with_name(annos, name='DontCare') + loc, dims, rots = annos['location'], annos['dimensions'], annos['rotation_y'] + gt_names = annos['name'] + gt_boxes_camera = np.concatenate([loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32) + gt_boxes_lidar = box_utils.boxes3d_kitti_camera_to_lidar(gt_boxes_camera, calib) + + input_dict.update({ + 'gt_names': gt_names, + 'gt_boxes': gt_boxes_lidar + }) + if "gt_boxes2d" in get_item_list: + input_dict['gt_boxes2d'] = annos["bbox"] + + road_plane = self.get_road_plane(sample_idx) + if road_plane is not None: + input_dict['road_plane'] = road_plane + + if "points" in get_item_list: + points = self.get_lidar(sample_idx) + if self.dataset_cfg.FOV_POINTS_ONLY: + pts_rect = calib.lidar_to_rect(points[:, 0:3]) + fov_flag = self.get_fov_flag(pts_rect, img_shape, calib) + points = points[fov_flag] + input_dict['points'] = points + + if "images" in get_item_list: + input_dict['images'] = self.get_image(sample_idx) + + if "depth_maps" in get_item_list: + input_dict['depth_maps'] = self.get_depth_map(sample_idx) + + if "calib_matricies" in get_item_list: + input_dict["trans_lidar_to_cam"], input_dict["trans_cam_to_img"] = kitti_utils.calib_to_matricies(calib) + + input_dict['calib'] = calib + data_dict = self.prepare_data(data_dict=input_dict) + + data_dict['image_shape'] = img_shape + return data_dict + + +def create_kitti_infos(dataset_cfg, class_names, data_path, save_path, workers=4): + dataset = KittiDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False) + train_split, val_split = 'train', 'val' + + train_filename = save_path / ('kitti_infos_%s.pkl' % train_split) + val_filename = save_path / ('kitti_infos_%s.pkl' % val_split) + trainval_filename = save_path / 'kitti_infos_trainval.pkl' + test_filename = save_path / 'kitti_infos_test.pkl' + + print('---------------Start to generate data infos---------------') + + dataset.set_split(train_split) + kitti_infos_train = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True) + with open(train_filename, 'wb') as f: + pickle.dump(kitti_infos_train, f) + print('Kitti info train file is saved to %s' % train_filename) + + dataset.set_split(val_split) + kitti_infos_val = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True) + with open(val_filename, 'wb') as f: + pickle.dump(kitti_infos_val, f) + print('Kitti info val file is saved to %s' % val_filename) + + with open(trainval_filename, 'wb') as f: + pickle.dump(kitti_infos_train + kitti_infos_val, f) + print('Kitti info trainval file is saved to %s' % trainval_filename) + + dataset.set_split('test') + kitti_infos_test = dataset.get_infos(num_workers=workers, has_label=False, count_inside_pts=False) + with open(test_filename, 'wb') as f: + pickle.dump(kitti_infos_test, f) + print('Kitti info test file is saved to %s' % test_filename) + + print('---------------Start create groundtruth database for data augmentation---------------') + dataset.set_split(train_split) + dataset.create_groundtruth_database(train_filename, split=train_split) + + print('---------------Data preparation Done---------------') + + +if __name__ == '__main__': + import sys + if sys.argv.__len__() > 1 and sys.argv[1] == 'create_kitti_infos': + import yaml + from pathlib import Path + from easydict import EasyDict + dataset_cfg = EasyDict(yaml.safe_load(open(sys.argv[2]))) + ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve() + create_kitti_infos( + dataset_cfg=dataset_cfg, + class_names=['Car', 'Pedestrian', 'Cyclist'], + data_path=ROOT_DIR / 'data' / 'kitti', + save_path=ROOT_DIR / 'data' / 'kitti' + ) diff --git a/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/LICENSE b/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/LICENSE new file mode 100644 index 000000000..ab602974d --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/README.md b/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/README.md new file mode 100644 index 000000000..913183edf --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/README.md @@ -0,0 +1,32 @@ +# kitti-object-eval-python +**Note**: This is borrowed from [traveller59/kitti-object-eval-python](https://github.com/traveller59/kitti-object-eval-python) + +Fast kitti object detection eval in python(finish eval in less than 10 second), support 2d/bev/3d/aos. , support coco-style AP. If you use command line interface, numba need some time to compile jit functions. +## Dependencies +Only support python 3.6+, need `numpy`, `skimage`, `numba`, `fire`. If you have Anaconda, just install `cudatoolkit` in anaconda. Otherwise, please reference to this [page](https://github.com/numba/numba#custom-python-environments) to set up llvm and cuda for numba. +* Install by conda: +``` +conda install -c numba cudatoolkit=x.x (8.0, 9.0, 9.1, depend on your environment) +``` +## Usage +* commandline interface: +``` +python evaluate.py evaluate --label_path=/path/to/your_gt_label_folder --result_path=/path/to/your_result_folder --label_split_file=/path/to/val.txt --current_class=0 --coco=False +``` +* python interface: +```Python +import kitti_common as kitti +from eval import get_official_eval_result, get_coco_eval_result +def _read_imageset_file(path): + with open(path, 'r') as f: + lines = f.readlines() + return [int(line) for line in lines] +det_path = "/path/to/your_result_folder" +dt_annos = kitti.get_label_annos(det_path) +gt_path = "/path/to/your_gt_label_folder" +gt_split_file = "/path/to/val.txt" # from https://xiaozhichen.github.io/files/mv3d/imagesets.tar.gz +val_image_ids = _read_imageset_file(gt_split_file) +gt_annos = kitti.get_label_annos(gt_path, val_image_ids) +print(get_official_eval_result(gt_annos, dt_annos, 0)) # 6s in my computer +print(get_coco_eval_result(gt_annos, dt_annos, 0)) # 18s in my computer +``` diff --git a/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/__init__.py b/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/eval.py b/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/eval.py new file mode 100644 index 000000000..1d2a317af --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/eval.py @@ -0,0 +1,808 @@ +import io as sysio + +import numba +import numpy as np + +from .rotate_iou import rotate_iou_gpu_eval + + +@numba.jit +def get_thresholds(scores: np.ndarray, num_gt, num_sample_pts=41): + scores.sort() + scores = scores[::-1] + current_recall = 0 + thresholds = [] + for i, score in enumerate(scores): + l_recall = (i + 1) / num_gt + if i < (len(scores) - 1): + r_recall = (i + 2) / num_gt + else: + r_recall = l_recall + if (((r_recall - current_recall) < (current_recall - l_recall)) + and (i < (len(scores) - 1))): + continue + # recall = l_recall + thresholds.append(score) + current_recall += 1 / (num_sample_pts - 1.0) + return thresholds + + +def clean_data(gt_anno, dt_anno, current_class, difficulty): + CLASS_NAMES = ['car', 'pedestrian', 'cyclist', 'van', 'person_sitting', 'truck'] + MIN_HEIGHT = [40, 25, 25] + MAX_OCCLUSION = [0, 1, 2] + MAX_TRUNCATION = [0.15, 0.3, 0.5] + dc_bboxes, ignored_gt, ignored_dt = [], [], [] + current_cls_name = CLASS_NAMES[current_class].lower() + num_gt = len(gt_anno["name"]) + num_dt = len(dt_anno["name"]) + num_valid_gt = 0 + for i in range(num_gt): + bbox = gt_anno["bbox"][i] + gt_name = gt_anno["name"][i].lower() + height = bbox[3] - bbox[1] + valid_class = -1 + if (gt_name == current_cls_name): + valid_class = 1 + elif (current_cls_name == "Pedestrian".lower() + and "Person_sitting".lower() == gt_name): + valid_class = 0 + elif (current_cls_name == "Car".lower() and "Van".lower() == gt_name): + valid_class = 0 + else: + valid_class = -1 + ignore = False + if ((gt_anno["occluded"][i] > MAX_OCCLUSION[difficulty]) + or (gt_anno["truncated"][i] > MAX_TRUNCATION[difficulty]) + or (height <= MIN_HEIGHT[difficulty])): + # if gt_anno["difficulty"][i] > difficulty or gt_anno["difficulty"][i] == -1: + ignore = True + if valid_class == 1 and not ignore: + ignored_gt.append(0) + num_valid_gt += 1 + elif (valid_class == 0 or (ignore and (valid_class == 1))): + ignored_gt.append(1) + else: + ignored_gt.append(-1) + # for i in range(num_gt): + if gt_anno["name"][i] == "DontCare": + dc_bboxes.append(gt_anno["bbox"][i]) + for i in range(num_dt): + if (dt_anno["name"][i].lower() == current_cls_name): + valid_class = 1 + else: + valid_class = -1 + height = abs(dt_anno["bbox"][i, 3] - dt_anno["bbox"][i, 1]) + if height < MIN_HEIGHT[difficulty]: + ignored_dt.append(1) + elif valid_class == 1: + ignored_dt.append(0) + else: + ignored_dt.append(-1) + + return num_valid_gt, ignored_gt, ignored_dt, dc_bboxes + + +@numba.jit(nopython=True) +def image_box_overlap(boxes, query_boxes, criterion=-1): + N = boxes.shape[0] + K = query_boxes.shape[0] + overlaps = np.zeros((N, K), dtype=boxes.dtype) + for k in range(K): + qbox_area = ((query_boxes[k, 2] - query_boxes[k, 0]) * + (query_boxes[k, 3] - query_boxes[k, 1])) + for n in range(N): + iw = (min(boxes[n, 2], query_boxes[k, 2]) - + max(boxes[n, 0], query_boxes[k, 0])) + if iw > 0: + ih = (min(boxes[n, 3], query_boxes[k, 3]) - + max(boxes[n, 1], query_boxes[k, 1])) + if ih > 0: + if criterion == -1: + ua = ( + (boxes[n, 2] - boxes[n, 0]) * + (boxes[n, 3] - boxes[n, 1]) + qbox_area - iw * ih) + elif criterion == 0: + ua = ((boxes[n, 2] - boxes[n, 0]) * + (boxes[n, 3] - boxes[n, 1])) + elif criterion == 1: + ua = qbox_area + else: + ua = 1.0 + overlaps[n, k] = iw * ih / ua + return overlaps + + +def bev_box_overlap(boxes, qboxes, criterion=-1): + riou = rotate_iou_gpu_eval(boxes, qboxes, criterion) + return riou + + +@numba.jit(nopython=True, parallel=True) +def d3_box_overlap_kernel(boxes, qboxes, rinc, criterion=-1): + # ONLY support overlap in CAMERA, not lider. + N, K = boxes.shape[0], qboxes.shape[0] + for i in range(N): + for j in range(K): + if rinc[i, j] > 0: + # iw = (min(boxes[i, 1] + boxes[i, 4], qboxes[j, 1] + + # qboxes[j, 4]) - max(boxes[i, 1], qboxes[j, 1])) + iw = (min(boxes[i, 1], qboxes[j, 1]) - max( + boxes[i, 1] - boxes[i, 4], qboxes[j, 1] - qboxes[j, 4])) + + if iw > 0: + area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5] + area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5] + inc = iw * rinc[i, j] + if criterion == -1: + ua = (area1 + area2 - inc) + elif criterion == 0: + ua = area1 + elif criterion == 1: + ua = area2 + else: + ua = inc + rinc[i, j] = inc / ua + else: + rinc[i, j] = 0.0 + + +def d3_box_overlap(boxes, qboxes, criterion=-1): + rinc = rotate_iou_gpu_eval(boxes[:, [0, 2, 3, 5, 6]], + qboxes[:, [0, 2, 3, 5, 6]], 2) + d3_box_overlap_kernel(boxes, qboxes, rinc, criterion) + return rinc + + +@numba.jit(nopython=True) +def compute_statistics_jit(overlaps, + gt_datas, + dt_datas, + ignored_gt, + ignored_det, + dc_bboxes, + metric, + min_overlap, + thresh=0, + compute_fp=False, + compute_aos=False): + + det_size = dt_datas.shape[0] + gt_size = gt_datas.shape[0] + dt_scores = dt_datas[:, -1] + dt_alphas = dt_datas[:, 4] + gt_alphas = gt_datas[:, 4] + dt_bboxes = dt_datas[:, :4] + gt_bboxes = gt_datas[:, :4] + + assigned_detection = [False] * det_size + ignored_threshold = [False] * det_size + if compute_fp: + for i in range(det_size): + if (dt_scores[i] < thresh): + ignored_threshold[i] = True + NO_DETECTION = -10000000 + tp, fp, fn, similarity = 0, 0, 0, 0 + # thresholds = [0.0] + # delta = [0.0] + thresholds = np.zeros((gt_size, )) + thresh_idx = 0 + delta = np.zeros((gt_size, )) + delta_idx = 0 + for i in range(gt_size): + if ignored_gt[i] == -1: + continue + det_idx = -1 + valid_detection = NO_DETECTION + max_overlap = 0 + assigned_ignored_det = False + + for j in range(det_size): + if (ignored_det[j] == -1): + continue + if (assigned_detection[j]): + continue + if (ignored_threshold[j]): + continue + overlap = overlaps[j, i] + dt_score = dt_scores[j] + if (not compute_fp and (overlap > min_overlap) + and dt_score > valid_detection): + det_idx = j + valid_detection = dt_score + elif (compute_fp and (overlap > min_overlap) + and (overlap > max_overlap or assigned_ignored_det) + and ignored_det[j] == 0): + max_overlap = overlap + det_idx = j + valid_detection = 1 + assigned_ignored_det = False + elif (compute_fp and (overlap > min_overlap) + and (valid_detection == NO_DETECTION) + and ignored_det[j] == 1): + det_idx = j + valid_detection = 1 + assigned_ignored_det = True + + if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0: + fn += 1 + elif ((valid_detection != NO_DETECTION) + and (ignored_gt[i] == 1 or ignored_det[det_idx] == 1)): + assigned_detection[det_idx] = True + elif valid_detection != NO_DETECTION: + tp += 1 + # thresholds.append(dt_scores[det_idx]) + thresholds[thresh_idx] = dt_scores[det_idx] + thresh_idx += 1 + if compute_aos: + # delta.append(gt_alphas[i] - dt_alphas[det_idx]) + delta[delta_idx] = gt_alphas[i] - dt_alphas[det_idx] + delta_idx += 1 + + assigned_detection[det_idx] = True + if compute_fp: + for i in range(det_size): + if (not (assigned_detection[i] or ignored_det[i] == -1 + or ignored_det[i] == 1 or ignored_threshold[i])): + fp += 1 + nstuff = 0 + if metric == 0: + overlaps_dt_dc = image_box_overlap(dt_bboxes, dc_bboxes, 0) + for i in range(dc_bboxes.shape[0]): + for j in range(det_size): + if (assigned_detection[j]): + continue + if (ignored_det[j] == -1 or ignored_det[j] == 1): + continue + if (ignored_threshold[j]): + continue + if overlaps_dt_dc[j, i] > min_overlap: + assigned_detection[j] = True + nstuff += 1 + fp -= nstuff + if compute_aos: + tmp = np.zeros((fp + delta_idx, )) + # tmp = [0] * fp + for i in range(delta_idx): + tmp[i + fp] = (1.0 + np.cos(delta[i])) / 2.0 + # tmp.append((1.0 + np.cos(delta[i])) / 2.0) + # assert len(tmp) == fp + tp + # assert len(delta) == tp + if tp > 0 or fp > 0: + similarity = np.sum(tmp) + else: + similarity = -1 + return tp, fp, fn, similarity, thresholds[:thresh_idx] + + +def get_split_parts(num, num_part): + same_part = num // num_part + remain_num = num % num_part + if same_part == 0: + return [num] + + if remain_num == 0: + return [same_part] * num_part + else: + return [same_part] * num_part + [remain_num] + + +@numba.jit(nopython=True) +def fused_compute_statistics(overlaps, + pr, + gt_nums, + dt_nums, + dc_nums, + gt_datas, + dt_datas, + dontcares, + ignored_gts, + ignored_dets, + metric, + min_overlap, + thresholds, + compute_aos=False): + gt_num = 0 + dt_num = 0 + dc_num = 0 + for i in range(gt_nums.shape[0]): + for t, thresh in enumerate(thresholds): + overlap = overlaps[dt_num:dt_num + dt_nums[i], gt_num: + gt_num + gt_nums[i]] + + gt_data = gt_datas[gt_num:gt_num + gt_nums[i]] + dt_data = dt_datas[dt_num:dt_num + dt_nums[i]] + ignored_gt = ignored_gts[gt_num:gt_num + gt_nums[i]] + ignored_det = ignored_dets[dt_num:dt_num + dt_nums[i]] + dontcare = dontcares[dc_num:dc_num + dc_nums[i]] + tp, fp, fn, similarity, _ = compute_statistics_jit( + overlap, + gt_data, + dt_data, + ignored_gt, + ignored_det, + dontcare, + metric, + min_overlap=min_overlap, + thresh=thresh, + compute_fp=True, + compute_aos=compute_aos) + pr[t, 0] += tp + pr[t, 1] += fp + pr[t, 2] += fn + if similarity != -1: + pr[t, 3] += similarity + gt_num += gt_nums[i] + dt_num += dt_nums[i] + dc_num += dc_nums[i] + + +def calculate_iou_partly(gt_annos, dt_annos, metric, num_parts=50): + """fast iou algorithm. this function can be used independently to + do result analysis. Must be used in CAMERA coordinate system. + Args: + gt_annos: dict, must from get_label_annos() in kitti_common.py + dt_annos: dict, must from get_label_annos() in kitti_common.py + metric: eval type. 0: bbox, 1: bev, 2: 3d + num_parts: int. a parameter for fast calculate algorithm + """ + assert len(gt_annos) == len(dt_annos) + total_dt_num = np.stack([len(a["name"]) for a in dt_annos], 0) + total_gt_num = np.stack([len(a["name"]) for a in gt_annos], 0) + num_examples = len(gt_annos) + split_parts = get_split_parts(num_examples, num_parts) + parted_overlaps = [] + example_idx = 0 + + for num_part in split_parts: + gt_annos_part = gt_annos[example_idx:example_idx + num_part] + dt_annos_part = dt_annos[example_idx:example_idx + num_part] + if metric == 0: + gt_boxes = np.concatenate([a["bbox"] for a in gt_annos_part], 0) + dt_boxes = np.concatenate([a["bbox"] for a in dt_annos_part], 0) + overlap_part = image_box_overlap(gt_boxes, dt_boxes) + elif metric == 1: + loc = np.concatenate( + [a["location"][:, [0, 2]] for a in gt_annos_part], 0) + dims = np.concatenate( + [a["dimensions"][:, [0, 2]] for a in gt_annos_part], 0) + rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0) + gt_boxes = np.concatenate( + [loc, dims, rots[..., np.newaxis]], axis=1) + loc = np.concatenate( + [a["location"][:, [0, 2]] for a in dt_annos_part], 0) + dims = np.concatenate( + [a["dimensions"][:, [0, 2]] for a in dt_annos_part], 0) + rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0) + dt_boxes = np.concatenate( + [loc, dims, rots[..., np.newaxis]], axis=1) + overlap_part = bev_box_overlap(gt_boxes, dt_boxes).astype( + np.float64) + elif metric == 2: + loc = np.concatenate([a["location"] for a in gt_annos_part], 0) + dims = np.concatenate([a["dimensions"] for a in gt_annos_part], 0) + rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0) + gt_boxes = np.concatenate( + [loc, dims, rots[..., np.newaxis]], axis=1) + loc = np.concatenate([a["location"] for a in dt_annos_part], 0) + dims = np.concatenate([a["dimensions"] for a in dt_annos_part], 0) + rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0) + dt_boxes = np.concatenate( + [loc, dims, rots[..., np.newaxis]], axis=1) + overlap_part = d3_box_overlap(gt_boxes, dt_boxes).astype( + np.float64) + else: + raise ValueError("unknown metric") + parted_overlaps.append(overlap_part) + example_idx += num_part + overlaps = [] + example_idx = 0 + for j, num_part in enumerate(split_parts): + gt_annos_part = gt_annos[example_idx:example_idx + num_part] + dt_annos_part = dt_annos[example_idx:example_idx + num_part] + gt_num_idx, dt_num_idx = 0, 0 + for i in range(num_part): + gt_box_num = total_gt_num[example_idx + i] + dt_box_num = total_dt_num[example_idx + i] + overlaps.append( + parted_overlaps[j][gt_num_idx:gt_num_idx + gt_box_num, + dt_num_idx:dt_num_idx + dt_box_num]) + gt_num_idx += gt_box_num + dt_num_idx += dt_box_num + example_idx += num_part + + return overlaps, parted_overlaps, total_gt_num, total_dt_num + + +def _prepare_data(gt_annos, dt_annos, current_class, difficulty): + gt_datas_list = [] + dt_datas_list = [] + total_dc_num = [] + ignored_gts, ignored_dets, dontcares = [], [], [] + total_num_valid_gt = 0 + for i in range(len(gt_annos)): + rets = clean_data(gt_annos[i], dt_annos[i], current_class, difficulty) + num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets + ignored_gts.append(np.array(ignored_gt, dtype=np.int64)) + ignored_dets.append(np.array(ignored_det, dtype=np.int64)) + if len(dc_bboxes) == 0: + dc_bboxes = np.zeros((0, 4)).astype(np.float64) + else: + dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64) + total_dc_num.append(dc_bboxes.shape[0]) + dontcares.append(dc_bboxes) + total_num_valid_gt += num_valid_gt + gt_datas = np.concatenate( + [gt_annos[i]["bbox"], gt_annos[i]["alpha"][..., np.newaxis]], 1) + dt_datas = np.concatenate([ + dt_annos[i]["bbox"], dt_annos[i]["alpha"][..., np.newaxis], + dt_annos[i]["score"][..., np.newaxis] + ], 1) + gt_datas_list.append(gt_datas) + dt_datas_list.append(dt_datas) + total_dc_num = np.stack(total_dc_num, axis=0) + return (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares, + total_dc_num, total_num_valid_gt) + + +def eval_class(gt_annos, + dt_annos, + current_classes, + difficultys, + metric, + min_overlaps, + compute_aos=False, + num_parts=100): + """Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP. + Args: + gt_annos: dict, must from get_label_annos() in kitti_common.py + dt_annos: dict, must from get_label_annos() in kitti_common.py + current_classes: list of int, 0: car, 1: pedestrian, 2: cyclist + difficultys: list of int. eval difficulty, 0: easy, 1: normal, 2: hard + metric: eval type. 0: bbox, 1: bev, 2: 3d + min_overlaps: float, min overlap. format: [num_overlap, metric, class]. + num_parts: int. a parameter for fast calculate algorithm + + Returns: + dict of recall, precision and aos + """ + assert len(gt_annos) == len(dt_annos) + num_examples = len(gt_annos) + split_parts = get_split_parts(num_examples, num_parts) + + rets = calculate_iou_partly(dt_annos, gt_annos, metric, num_parts) + overlaps, parted_overlaps, total_dt_num, total_gt_num = rets + N_SAMPLE_PTS = 41 + num_minoverlap = len(min_overlaps) + num_class = len(current_classes) + num_difficulty = len(difficultys) + precision = np.zeros( + [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]) + recall = np.zeros( + [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]) + aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]) + for m, current_class in enumerate(current_classes): + for l, difficulty in enumerate(difficultys): + rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty) + (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, + dontcares, total_dc_num, total_num_valid_gt) = rets + for k, min_overlap in enumerate(min_overlaps[:, metric, m]): + thresholdss = [] + for i in range(len(gt_annos)): + rets = compute_statistics_jit( + overlaps[i], + gt_datas_list[i], + dt_datas_list[i], + ignored_gts[i], + ignored_dets[i], + dontcares[i], + metric, + min_overlap=min_overlap, + thresh=0.0, + compute_fp=False) + tp, fp, fn, similarity, thresholds = rets + thresholdss += thresholds.tolist() + thresholdss = np.array(thresholdss) + thresholds = get_thresholds(thresholdss, total_num_valid_gt) + thresholds = np.array(thresholds) + pr = np.zeros([len(thresholds), 4]) + idx = 0 + for j, num_part in enumerate(split_parts): + gt_datas_part = np.concatenate( + gt_datas_list[idx:idx + num_part], 0) + dt_datas_part = np.concatenate( + dt_datas_list[idx:idx + num_part], 0) + dc_datas_part = np.concatenate( + dontcares[idx:idx + num_part], 0) + ignored_dets_part = np.concatenate( + ignored_dets[idx:idx + num_part], 0) + ignored_gts_part = np.concatenate( + ignored_gts[idx:idx + num_part], 0) + fused_compute_statistics( + parted_overlaps[j], + pr, + total_gt_num[idx:idx + num_part], + total_dt_num[idx:idx + num_part], + total_dc_num[idx:idx + num_part], + gt_datas_part, + dt_datas_part, + dc_datas_part, + ignored_gts_part, + ignored_dets_part, + metric, + min_overlap=min_overlap, + thresholds=thresholds, + compute_aos=compute_aos) + idx += num_part + for i in range(len(thresholds)): + recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2]) + precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1]) + if compute_aos: + aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1]) + for i in range(len(thresholds)): + precision[m, l, k, i] = np.max( + precision[m, l, k, i:], axis=-1) + recall[m, l, k, i] = np.max(recall[m, l, k, i:], axis=-1) + if compute_aos: + aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1) + ret_dict = { + "recall": recall, + "precision": precision, + "orientation": aos, + } + return ret_dict + + +def get_mAP(prec): + sums = 0 + for i in range(0, prec.shape[-1], 4): + sums = sums + prec[..., i] + return sums / 11 * 100 + + +def get_mAP_R40(prec): + sums = 0 + for i in range(1, prec.shape[-1]): + sums = sums + prec[..., i] + return sums / 40 * 100 + + +def print_str(value, *arg, sstream=None): + if sstream is None: + sstream = sysio.StringIO() + sstream.truncate(0) + sstream.seek(0) + print(value, *arg, file=sstream) + return sstream.getvalue() + + +def do_eval(gt_annos, + dt_annos, + current_classes, + min_overlaps, + compute_aos=False, + PR_detail_dict=None): + # min_overlaps: [num_minoverlap, metric, num_class] + difficultys = [0, 1, 2] + ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 0, + min_overlaps, compute_aos) + # ret: [num_class, num_diff, num_minoverlap, num_sample_points] + mAP_bbox = get_mAP(ret["precision"]) + mAP_bbox_R40 = get_mAP_R40(ret["precision"]) + + if PR_detail_dict is not None: + PR_detail_dict['bbox'] = ret['precision'] + + mAP_aos = mAP_aos_R40 = None + if compute_aos: + mAP_aos = get_mAP(ret["orientation"]) + mAP_aos_R40 = get_mAP_R40(ret["orientation"]) + + if PR_detail_dict is not None: + PR_detail_dict['aos'] = ret['orientation'] + + ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 1, + min_overlaps) + mAP_bev = get_mAP(ret["precision"]) + mAP_bev_R40 = get_mAP_R40(ret["precision"]) + + if PR_detail_dict is not None: + PR_detail_dict['bev'] = ret['precision'] + + ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 2, + min_overlaps) + mAP_3d = get_mAP(ret["precision"]) + mAP_3d_R40 = get_mAP_R40(ret["precision"]) + if PR_detail_dict is not None: + PR_detail_dict['3d'] = ret['precision'] + return mAP_bbox, mAP_bev, mAP_3d, mAP_aos, mAP_bbox_R40, mAP_bev_R40, mAP_3d_R40, mAP_aos_R40 + + +def do_coco_style_eval(gt_annos, dt_annos, current_classes, overlap_ranges, + compute_aos): + # overlap_ranges: [range, metric, num_class] + min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]]) + for i in range(overlap_ranges.shape[1]): + for j in range(overlap_ranges.shape[2]): + min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j]) + mAP_bbox, mAP_bev, mAP_3d, mAP_aos = do_eval( + gt_annos, dt_annos, current_classes, min_overlaps, compute_aos) + # ret: [num_class, num_diff, num_minoverlap] + mAP_bbox = mAP_bbox.mean(-1) + mAP_bev = mAP_bev.mean(-1) + mAP_3d = mAP_3d.mean(-1) + if mAP_aos is not None: + mAP_aos = mAP_aos.mean(-1) + return mAP_bbox, mAP_bev, mAP_3d, mAP_aos + + +def get_official_eval_result(gt_annos, dt_annos, current_classes, PR_detail_dict=None): + overlap_0_7 = np.array([[0.7, 0.5, 0.5, 0.7, + 0.5, 0.7], [0.7, 0.5, 0.5, 0.7, 0.5, 0.7], + [0.7, 0.5, 0.5, 0.7, 0.5, 0.7]]) + overlap_0_5 = np.array([[0.7, 0.5, 0.5, 0.7, + 0.5, 0.5], [0.5, 0.25, 0.25, 0.5, 0.25, 0.5], + [0.5, 0.25, 0.25, 0.5, 0.25, 0.5]]) + min_overlaps = np.stack([overlap_0_7, overlap_0_5], axis=0) # [2, 3, 5] + class_to_name = { + 0: 'Car', + 1: 'Pedestrian', + 2: 'Cyclist', + 3: 'Van', + 4: 'Person_sitting', + 5: 'Truck' + } + name_to_class = {v: n for n, v in class_to_name.items()} + if not isinstance(current_classes, (list, tuple)): + current_classes = [current_classes] + current_classes_int = [] + for curcls in current_classes: + if isinstance(curcls, str): + current_classes_int.append(name_to_class[curcls]) + else: + current_classes_int.append(curcls) + current_classes = current_classes_int + min_overlaps = min_overlaps[:, :, current_classes] + result = '' + # check whether alpha is valid + compute_aos = False + for anno in dt_annos: + if anno['alpha'].shape[0] != 0: + if anno['alpha'][0] != -10: + compute_aos = True + break + mAPbbox, mAPbev, mAP3d, mAPaos, mAPbbox_R40, mAPbev_R40, mAP3d_R40, mAPaos_R40 = do_eval( + gt_annos, dt_annos, current_classes, min_overlaps, compute_aos, PR_detail_dict=PR_detail_dict) + + ret_dict = {} + for j, curcls in enumerate(current_classes): + # mAP threshold array: [num_minoverlap, metric, class] + # mAP result: [num_class, num_diff, num_minoverlap] + for i in range(min_overlaps.shape[0]): + result += print_str( + (f"{class_to_name[curcls]} " + "AP@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j]))) + result += print_str((f"bbox AP:{mAPbbox[j, 0, i]:.4f}, " + f"{mAPbbox[j, 1, i]:.4f}, " + f"{mAPbbox[j, 2, i]:.4f}")) + result += print_str((f"bev AP:{mAPbev[j, 0, i]:.4f}, " + f"{mAPbev[j, 1, i]:.4f}, " + f"{mAPbev[j, 2, i]:.4f}")) + result += print_str((f"3d AP:{mAP3d[j, 0, i]:.4f}, " + f"{mAP3d[j, 1, i]:.4f}, " + f"{mAP3d[j, 2, i]:.4f}")) + + if compute_aos: + result += print_str((f"aos AP:{mAPaos[j, 0, i]:.2f}, " + f"{mAPaos[j, 1, i]:.2f}, " + f"{mAPaos[j, 2, i]:.2f}")) + # if i == 0: + # ret_dict['%s_aos/easy' % class_to_name[curcls]] = mAPaos[j, 0, 0] + # ret_dict['%s_aos/moderate' % class_to_name[curcls]] = mAPaos[j, 1, 0] + # ret_dict['%s_aos/hard' % class_to_name[curcls]] = mAPaos[j, 2, 0] + + result += print_str( + (f"{class_to_name[curcls]} " + "AP_R40@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j]))) + result += print_str((f"bbox AP:{mAPbbox_R40[j, 0, i]:.4f}, " + f"{mAPbbox_R40[j, 1, i]:.4f}, " + f"{mAPbbox_R40[j, 2, i]:.4f}")) + result += print_str((f"bev AP:{mAPbev_R40[j, 0, i]:.4f}, " + f"{mAPbev_R40[j, 1, i]:.4f}, " + f"{mAPbev_R40[j, 2, i]:.4f}")) + result += print_str((f"3d AP:{mAP3d_R40[j, 0, i]:.4f}, " + f"{mAP3d_R40[j, 1, i]:.4f}, " + f"{mAP3d_R40[j, 2, i]:.4f}")) + if compute_aos: + result += print_str((f"aos AP:{mAPaos_R40[j, 0, i]:.2f}, " + f"{mAPaos_R40[j, 1, i]:.2f}, " + f"{mAPaos_R40[j, 2, i]:.2f}")) + if i == 0: + ret_dict['%s_aos/easy_R40' % class_to_name[curcls]] = mAPaos_R40[j, 0, 0] + ret_dict['%s_aos/moderate_R40' % class_to_name[curcls]] = mAPaos_R40[j, 1, 0] + ret_dict['%s_aos/hard_R40' % class_to_name[curcls]] = mAPaos_R40[j, 2, 0] + + if i == 0: + # ret_dict['%s_3d/easy' % class_to_name[curcls]] = mAP3d[j, 0, 0] + # ret_dict['%s_3d/moderate' % class_to_name[curcls]] = mAP3d[j, 1, 0] + # ret_dict['%s_3d/hard' % class_to_name[curcls]] = mAP3d[j, 2, 0] + # ret_dict['%s_bev/easy' % class_to_name[curcls]] = mAPbev[j, 0, 0] + # ret_dict['%s_bev/moderate' % class_to_name[curcls]] = mAPbev[j, 1, 0] + # ret_dict['%s_bev/hard' % class_to_name[curcls]] = mAPbev[j, 2, 0] + # ret_dict['%s_image/easy' % class_to_name[curcls]] = mAPbbox[j, 0, 0] + # ret_dict['%s_image/moderate' % class_to_name[curcls]] = mAPbbox[j, 1, 0] + # ret_dict['%s_image/hard' % class_to_name[curcls]] = mAPbbox[j, 2, 0] + + ret_dict['%s_3d/easy_R40' % class_to_name[curcls]] = mAP3d_R40[j, 0, 0] + ret_dict['%s_3d/moderate_R40' % class_to_name[curcls]] = mAP3d_R40[j, 1, 0] + ret_dict['%s_3d/hard_R40' % class_to_name[curcls]] = mAP3d_R40[j, 2, 0] + ret_dict['%s_bev/easy_R40' % class_to_name[curcls]] = mAPbev_R40[j, 0, 0] + ret_dict['%s_bev/moderate_R40' % class_to_name[curcls]] = mAPbev_R40[j, 1, 0] + ret_dict['%s_bev/hard_R40' % class_to_name[curcls]] = mAPbev_R40[j, 2, 0] + ret_dict['%s_image/easy_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 0, 0] + ret_dict['%s_image/moderate_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 1, 0] + ret_dict['%s_image/hard_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 2, 0] + + return result, ret_dict + + +def get_coco_eval_result(gt_annos, dt_annos, current_classes): + class_to_name = { + 0: 'Car', + 1: 'Pedestrian', + 2: 'Cyclist', + 3: 'Van', + 4: 'Person_sitting', + } + class_to_range = { + 0: [0.5, 0.95, 10], + 1: [0.25, 0.7, 10], + 2: [0.25, 0.7, 10], + 3: [0.5, 0.95, 10], + 4: [0.25, 0.7, 10], + } + name_to_class = {v: n for n, v in class_to_name.items()} + if not isinstance(current_classes, (list, tuple)): + current_classes = [current_classes] + current_classes_int = [] + for curcls in current_classes: + if isinstance(curcls, str): + current_classes_int.append(name_to_class[curcls]) + else: + current_classes_int.append(curcls) + current_classes = current_classes_int + overlap_ranges = np.zeros([3, 3, len(current_classes)]) + for i, curcls in enumerate(current_classes): + overlap_ranges[:, :, i] = np.array( + class_to_range[curcls])[:, np.newaxis] + result = '' + # check whether alpha is valid + compute_aos = False + for anno in dt_annos: + if anno['alpha'].shape[0] != 0: + if anno['alpha'][0] != -10: + compute_aos = True + break + mAPbbox, mAPbev, mAP3d, mAPaos = do_coco_style_eval( + gt_annos, dt_annos, current_classes, overlap_ranges, compute_aos) + for j, curcls in enumerate(current_classes): + # mAP threshold array: [num_minoverlap, metric, class] + # mAP result: [num_class, num_diff, num_minoverlap] + o_range = np.array(class_to_range[curcls])[[0, 2, 1]] + o_range[1] = (o_range[2] - o_range[0]) / (o_range[1] - 1) + result += print_str((f"{class_to_name[curcls]} " + "coco AP@{:.2f}:{:.2f}:{:.2f}:".format(*o_range))) + result += print_str((f"bbox AP:{mAPbbox[j, 0]:.2f}, " + f"{mAPbbox[j, 1]:.2f}, " + f"{mAPbbox[j, 2]:.2f}")) + result += print_str((f"bev AP:{mAPbev[j, 0]:.2f}, " + f"{mAPbev[j, 1]:.2f}, " + f"{mAPbev[j, 2]:.2f}")) + result += print_str((f"3d AP:{mAP3d[j, 0]:.2f}, " + f"{mAP3d[j, 1]:.2f}, " + f"{mAP3d[j, 2]:.2f}")) + if compute_aos: + result += print_str((f"aos AP:{mAPaos[j, 0]:.2f}, " + f"{mAPaos[j, 1]:.2f}, " + f"{mAPaos[j, 2]:.2f}")) + return result diff --git a/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/evaluate.py b/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/evaluate.py new file mode 100644 index 000000000..7e1dab6f0 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/evaluate.py @@ -0,0 +1,33 @@ +import time + +import fire + +import .kitti_common as kitti +from .eval import get_coco_eval_result, get_official_eval_result + + +def _read_imageset_file(path): + with open(path, 'r') as f: + lines = f.readlines() + return [int(line) for line in lines] + + +def evaluate(label_path, + result_path, + label_split_file, + current_class=0, + coco=False, + score_thresh=-1): + dt_annos = kitti.get_label_annos(result_path) + if score_thresh > 0: + dt_annos = kitti.filter_annos_low_score(dt_annos, score_thresh) + val_image_ids = _read_imageset_file(label_split_file) + gt_annos = kitti.get_label_annos(label_path, val_image_ids) + if coco: + return get_coco_eval_result(gt_annos, dt_annos, current_class) + else: + return get_official_eval_result(gt_annos, dt_annos, current_class) + + +if __name__ == '__main__': + fire.Fire() diff --git a/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/kitti_common.py b/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/kitti_common.py new file mode 100644 index 000000000..27c91444d --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/kitti_common.py @@ -0,0 +1,412 @@ +import concurrent.futures as futures +import os +import pathlib +import re +from collections import OrderedDict + +import numpy as np +from skimage import io + + +def get_image_index_str(img_idx): + return "{:06d}".format(img_idx) + + +def get_kitti_info_path(idx, + prefix, + info_type='image_2', + file_tail='.png', + training=True, + relative_path=True): + img_idx_str = get_image_index_str(idx) + img_idx_str += file_tail + prefix = pathlib.Path(prefix) + if training: + file_path = pathlib.Path('training') / info_type / img_idx_str + else: + file_path = pathlib.Path('testing') / info_type / img_idx_str + if not (prefix / file_path).exists(): + raise ValueError("file not exist: {}".format(file_path)) + if relative_path: + return str(file_path) + else: + return str(prefix / file_path) + + +def get_image_path(idx, prefix, training=True, relative_path=True): + return get_kitti_info_path(idx, prefix, 'image_2', '.png', training, + relative_path) + + +def get_label_path(idx, prefix, training=True, relative_path=True): + return get_kitti_info_path(idx, prefix, 'label_2', '.txt', training, + relative_path) + + +def get_velodyne_path(idx, prefix, training=True, relative_path=True): + return get_kitti_info_path(idx, prefix, 'velodyne', '.bin', training, + relative_path) + + +def get_calib_path(idx, prefix, training=True, relative_path=True): + return get_kitti_info_path(idx, prefix, 'calib', '.txt', training, + relative_path) + + +def _extend_matrix(mat): + mat = np.concatenate([mat, np.array([[0., 0., 0., 1.]])], axis=0) + return mat + + +def get_kitti_image_info(path, + training=True, + label_info=True, + velodyne=False, + calib=False, + image_ids=7481, + extend_matrix=True, + num_worker=8, + relative_path=True, + with_imageshape=True): + # image_infos = [] + root_path = pathlib.Path(path) + if not isinstance(image_ids, list): + image_ids = list(range(image_ids)) + + def map_func(idx): + image_info = {'image_idx': idx} + annotations = None + if velodyne: + image_info['velodyne_path'] = get_velodyne_path( + idx, path, training, relative_path) + image_info['img_path'] = get_image_path(idx, path, training, + relative_path) + if with_imageshape: + img_path = image_info['img_path'] + if relative_path: + img_path = str(root_path / img_path) + image_info['img_shape'] = np.array( + io.imread(img_path).shape[:2], dtype=np.int32) + if label_info: + label_path = get_label_path(idx, path, training, relative_path) + if relative_path: + label_path = str(root_path / label_path) + annotations = get_label_anno(label_path) + if calib: + calib_path = get_calib_path( + idx, path, training, relative_path=False) + with open(calib_path, 'r') as f: + lines = f.readlines() + P0 = np.array( + [float(info) for info in lines[0].split(' ')[1:13]]).reshape( + [3, 4]) + P1 = np.array( + [float(info) for info in lines[1].split(' ')[1:13]]).reshape( + [3, 4]) + P2 = np.array( + [float(info) for info in lines[2].split(' ')[1:13]]).reshape( + [3, 4]) + P3 = np.array( + [float(info) for info in lines[3].split(' ')[1:13]]).reshape( + [3, 4]) + if extend_matrix: + P0 = _extend_matrix(P0) + P1 = _extend_matrix(P1) + P2 = _extend_matrix(P2) + P3 = _extend_matrix(P3) + image_info['calib/P0'] = P0 + image_info['calib/P1'] = P1 + image_info['calib/P2'] = P2 + image_info['calib/P3'] = P3 + R0_rect = np.array([ + float(info) for info in lines[4].split(' ')[1:10] + ]).reshape([3, 3]) + if extend_matrix: + rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype) + rect_4x4[3, 3] = 1. + rect_4x4[:3, :3] = R0_rect + else: + rect_4x4 = R0_rect + image_info['calib/R0_rect'] = rect_4x4 + Tr_velo_to_cam = np.array([ + float(info) for info in lines[5].split(' ')[1:13] + ]).reshape([3, 4]) + Tr_imu_to_velo = np.array([ + float(info) for info in lines[6].split(' ')[1:13] + ]).reshape([3, 4]) + if extend_matrix: + Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam) + Tr_imu_to_velo = _extend_matrix(Tr_imu_to_velo) + image_info['calib/Tr_velo_to_cam'] = Tr_velo_to_cam + image_info['calib/Tr_imu_to_velo'] = Tr_imu_to_velo + if annotations is not None: + image_info['annos'] = annotations + add_difficulty_to_annos(image_info) + return image_info + + with futures.ThreadPoolExecutor(num_worker) as executor: + image_infos = executor.map(map_func, image_ids) + return list(image_infos) + + +def filter_kitti_anno(image_anno, + used_classes, + used_difficulty=None, + dontcare_iou=None): + if not isinstance(used_classes, (list, tuple)): + used_classes = [used_classes] + img_filtered_annotations = {} + relevant_annotation_indices = [ + i for i, x in enumerate(image_anno['name']) if x in used_classes + ] + for key in image_anno.keys(): + img_filtered_annotations[key] = ( + image_anno[key][relevant_annotation_indices]) + if used_difficulty is not None: + relevant_annotation_indices = [ + i for i, x in enumerate(img_filtered_annotations['difficulty']) + if x in used_difficulty + ] + for key in image_anno.keys(): + img_filtered_annotations[key] = ( + img_filtered_annotations[key][relevant_annotation_indices]) + + if 'DontCare' in used_classes and dontcare_iou is not None: + dont_care_indices = [ + i for i, x in enumerate(img_filtered_annotations['name']) + if x == 'DontCare' + ] + # bounding box format [y_min, x_min, y_max, x_max] + all_boxes = img_filtered_annotations['bbox'] + ious = iou(all_boxes, all_boxes[dont_care_indices]) + + # Remove all bounding boxes that overlap with a dontcare region. + if ious.size > 0: + boxes_to_remove = np.amax(ious, axis=1) > dontcare_iou + for key in image_anno.keys(): + img_filtered_annotations[key] = (img_filtered_annotations[key][ + np.logical_not(boxes_to_remove)]) + return img_filtered_annotations + +def filter_annos_low_score(image_annos, thresh): + new_image_annos = [] + for anno in image_annos: + img_filtered_annotations = {} + relevant_annotation_indices = [ + i for i, s in enumerate(anno['score']) if s >= thresh + ] + for key in anno.keys(): + img_filtered_annotations[key] = ( + anno[key][relevant_annotation_indices]) + new_image_annos.append(img_filtered_annotations) + return new_image_annos + +def kitti_result_line(result_dict, precision=4): + prec_float = "{" + ":.{}f".format(precision) + "}" + res_line = [] + all_field_default = OrderedDict([ + ('name', None), + ('truncated', -1), + ('occluded', -1), + ('alpha', -10), + ('bbox', None), + ('dimensions', [-1, -1, -1]), + ('location', [-1000, -1000, -1000]), + ('rotation_y', -10), + ('score', None), + ]) + res_dict = [(key, None) for key, val in all_field_default.items()] + res_dict = OrderedDict(res_dict) + for key, val in result_dict.items(): + if all_field_default[key] is None and val is None: + raise ValueError("you must specify a value for {}".format(key)) + res_dict[key] = val + + for key, val in res_dict.items(): + if key == 'name': + res_line.append(val) + elif key in ['truncated', 'alpha', 'rotation_y', 'score']: + if val is None: + res_line.append(str(all_field_default[key])) + else: + res_line.append(prec_float.format(val)) + elif key == 'occluded': + if val is None: + res_line.append(str(all_field_default[key])) + else: + res_line.append('{}'.format(val)) + elif key in ['bbox', 'dimensions', 'location']: + if val is None: + res_line += [str(v) for v in all_field_default[key]] + else: + res_line += [prec_float.format(v) for v in val] + else: + raise ValueError("unknown key. supported key:{}".format( + res_dict.keys())) + return ' '.join(res_line) + + +def add_difficulty_to_annos(info): + min_height = [40, 25, + 25] # minimum height for evaluated groundtruth/detections + max_occlusion = [ + 0, 1, 2 + ] # maximum occlusion level of the groundtruth used for eval_utils + max_trunc = [ + 0.15, 0.3, 0.5 + ] # maximum truncation level of the groundtruth used for eval_utils + annos = info['annos'] + dims = annos['dimensions'] # lhw format + bbox = annos['bbox'] + height = bbox[:, 3] - bbox[:, 1] + occlusion = annos['occluded'] + truncation = annos['truncated'] + diff = [] + easy_mask = np.ones((len(dims), ), dtype=np.bool) + moderate_mask = np.ones((len(dims), ), dtype=np.bool) + hard_mask = np.ones((len(dims), ), dtype=np.bool) + i = 0 + for h, o, t in zip(height, occlusion, truncation): + if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]: + easy_mask[i] = False + if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]: + moderate_mask[i] = False + if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]: + hard_mask[i] = False + i += 1 + is_easy = easy_mask + is_moderate = np.logical_xor(easy_mask, moderate_mask) + is_hard = np.logical_xor(hard_mask, moderate_mask) + + for i in range(len(dims)): + if is_easy[i]: + diff.append(0) + elif is_moderate[i]: + diff.append(1) + elif is_hard[i]: + diff.append(2) + else: + diff.append(-1) + annos["difficulty"] = np.array(diff, np.int32) + return diff + + +def get_label_anno(label_path): + annotations = {} + annotations.update({ + 'name': [], + 'truncated': [], + 'occluded': [], + 'alpha': [], + 'bbox': [], + 'dimensions': [], + 'location': [], + 'rotation_y': [] + }) + with open(label_path, 'r') as f: + lines = f.readlines() + # if len(lines) == 0 or len(lines[0]) < 15: + # content = [] + # else: + content = [line.strip().split(' ') for line in lines] + annotations['name'] = np.array([x[0] for x in content]) + annotations['truncated'] = np.array([float(x[1]) for x in content]) + annotations['occluded'] = np.array([int(x[2]) for x in content]) + annotations['alpha'] = np.array([float(x[3]) for x in content]) + annotations['bbox'] = np.array( + [[float(info) for info in x[4:8]] for x in content]).reshape(-1, 4) + # dimensions will convert hwl format to standard lhw(camera) format. + annotations['dimensions'] = np.array( + [[float(info) for info in x[8:11]] for x in content]).reshape( + -1, 3)[:, [2, 0, 1]] + annotations['location'] = np.array( + [[float(info) for info in x[11:14]] for x in content]).reshape(-1, 3) + annotations['rotation_y'] = np.array( + [float(x[14]) for x in content]).reshape(-1) + if len(content) != 0 and len(content[0]) == 16: # have score + annotations['score'] = np.array([float(x[15]) for x in content]) + else: + annotations['score'] = np.zeros([len(annotations['bbox'])]) + return annotations + +def get_label_annos(label_folder, image_ids=None): + if image_ids is None: + filepaths = pathlib.Path(label_folder).glob('*.txt') + prog = re.compile(r'^\d{6}.txt$') + filepaths = filter(lambda f: prog.match(f.name), filepaths) + image_ids = [int(p.stem) for p in filepaths] + image_ids = sorted(image_ids) + if not isinstance(image_ids, list): + image_ids = list(range(image_ids)) + annos = [] + label_folder = pathlib.Path(label_folder) + for idx in image_ids: + image_idx = get_image_index_str(idx) + label_filename = label_folder / (image_idx + '.txt') + annos.append(get_label_anno(label_filename)) + return annos + +def area(boxes, add1=False): + """Computes area of boxes. + + Args: + boxes: Numpy array with shape [N, 4] holding N boxes + + Returns: + a numpy array with shape [N*1] representing box areas + """ + if add1: + return (boxes[:, 2] - boxes[:, 0] + 1.0) * ( + boxes[:, 3] - boxes[:, 1] + 1.0) + else: + return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) + + +def intersection(boxes1, boxes2, add1=False): + """Compute pairwise intersection areas between boxes. + + Args: + boxes1: a numpy array with shape [N, 4] holding N boxes + boxes2: a numpy array with shape [M, 4] holding M boxes + + Returns: + a numpy array with shape [N*M] representing pairwise intersection area + """ + [y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1) + [y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1) + + all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2)) + all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2)) + if add1: + all_pairs_min_ymax += 1.0 + intersect_heights = np.maximum( + np.zeros(all_pairs_max_ymin.shape), + all_pairs_min_ymax - all_pairs_max_ymin) + + all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2)) + all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2)) + if add1: + all_pairs_min_xmax += 1.0 + intersect_widths = np.maximum( + np.zeros(all_pairs_max_xmin.shape), + all_pairs_min_xmax - all_pairs_max_xmin) + return intersect_heights * intersect_widths + + +def iou(boxes1, boxes2, add1=False): + """Computes pairwise intersection-over-union between box collections. + + Args: + boxes1: a numpy array with shape [N, 4] holding N boxes. + boxes2: a numpy array with shape [M, 4] holding N boxes. + + Returns: + a numpy array with shape [N, M] representing pairwise iou scores. + """ + intersect = intersection(boxes1, boxes2, add1) + area1 = area(boxes1, add1) + area2 = area(boxes2, add1) + union = np.expand_dims( + area1, axis=1) + np.expand_dims( + area2, axis=0) - intersect + return intersect / union diff --git a/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/rotate_iou.py b/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/rotate_iou.py new file mode 100644 index 000000000..543d8f26f --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/kitti/kitti_object_eval_python/rotate_iou.py @@ -0,0 +1,330 @@ +##################### +# Based on https://github.com/hongzhenwang/RRPN-revise +# Licensed under The MIT License +# Author: yanyan, scrin@foxmail.com +##################### +import math + +import numba +import numpy as np +from numba import cuda + + +@numba.jit(nopython=True) +def div_up(m, n): + return m // n + (m % n > 0) + +@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True) +def trangle_area(a, b, c): + return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * + (b[0] - c[0])) / 2.0 + + +@cuda.jit('(float32[:], int32)', device=True, inline=True) +def area(int_pts, num_of_inter): + area_val = 0.0 + for i in range(num_of_inter - 2): + area_val += abs( + trangle_area(int_pts[:2], int_pts[2 * i + 2:2 * i + 4], + int_pts[2 * i + 4:2 * i + 6])) + return area_val + + +@cuda.jit('(float32[:], int32)', device=True, inline=True) +def sort_vertex_in_convex_polygon(int_pts, num_of_inter): + if num_of_inter > 0: + center = cuda.local.array((2, ), dtype=numba.float32) + center[:] = 0.0 + for i in range(num_of_inter): + center[0] += int_pts[2 * i] + center[1] += int_pts[2 * i + 1] + center[0] /= num_of_inter + center[1] /= num_of_inter + v = cuda.local.array((2, ), dtype=numba.float32) + vs = cuda.local.array((16, ), dtype=numba.float32) + for i in range(num_of_inter): + v[0] = int_pts[2 * i] - center[0] + v[1] = int_pts[2 * i + 1] - center[1] + d = math.sqrt(v[0] * v[0] + v[1] * v[1]) + v[0] = v[0] / d + v[1] = v[1] / d + if v[1] < 0: + v[0] = -2 - v[0] + vs[i] = v[0] + j = 0 + temp = 0 + for i in range(1, num_of_inter): + if vs[i - 1] > vs[i]: + temp = vs[i] + tx = int_pts[2 * i] + ty = int_pts[2 * i + 1] + j = i + while j > 0 and vs[j - 1] > temp: + vs[j] = vs[j - 1] + int_pts[j * 2] = int_pts[j * 2 - 2] + int_pts[j * 2 + 1] = int_pts[j * 2 - 1] + j -= 1 + + vs[j] = temp + int_pts[j * 2] = tx + int_pts[j * 2 + 1] = ty + + +@cuda.jit( + '(float32[:], float32[:], int32, int32, float32[:])', + device=True, + inline=True) +def line_segment_intersection(pts1, pts2, i, j, temp_pts): + A = cuda.local.array((2, ), dtype=numba.float32) + B = cuda.local.array((2, ), dtype=numba.float32) + C = cuda.local.array((2, ), dtype=numba.float32) + D = cuda.local.array((2, ), dtype=numba.float32) + + A[0] = pts1[2 * i] + A[1] = pts1[2 * i + 1] + + B[0] = pts1[2 * ((i + 1) % 4)] + B[1] = pts1[2 * ((i + 1) % 4) + 1] + + C[0] = pts2[2 * j] + C[1] = pts2[2 * j + 1] + + D[0] = pts2[2 * ((j + 1) % 4)] + D[1] = pts2[2 * ((j + 1) % 4) + 1] + BA0 = B[0] - A[0] + BA1 = B[1] - A[1] + DA0 = D[0] - A[0] + CA0 = C[0] - A[0] + DA1 = D[1] - A[1] + CA1 = C[1] - A[1] + acd = DA1 * CA0 > CA1 * DA0 + bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0]) + if acd != bcd: + abc = CA1 * BA0 > BA1 * CA0 + abd = DA1 * BA0 > BA1 * DA0 + if abc != abd: + DC0 = D[0] - C[0] + DC1 = D[1] - C[1] + ABBA = A[0] * B[1] - B[0] * A[1] + CDDC = C[0] * D[1] - D[0] * C[1] + DH = BA1 * DC0 - BA0 * DC1 + Dx = ABBA * DC0 - BA0 * CDDC + Dy = ABBA * DC1 - BA1 * CDDC + temp_pts[0] = Dx / DH + temp_pts[1] = Dy / DH + return True + return False + + +@cuda.jit( + '(float32[:], float32[:], int32, int32, float32[:])', + device=True, + inline=True) +def line_segment_intersection_v1(pts1, pts2, i, j, temp_pts): + a = cuda.local.array((2, ), dtype=numba.float32) + b = cuda.local.array((2, ), dtype=numba.float32) + c = cuda.local.array((2, ), dtype=numba.float32) + d = cuda.local.array((2, ), dtype=numba.float32) + + a[0] = pts1[2 * i] + a[1] = pts1[2 * i + 1] + + b[0] = pts1[2 * ((i + 1) % 4)] + b[1] = pts1[2 * ((i + 1) % 4) + 1] + + c[0] = pts2[2 * j] + c[1] = pts2[2 * j + 1] + + d[0] = pts2[2 * ((j + 1) % 4)] + d[1] = pts2[2 * ((j + 1) % 4) + 1] + + area_abc = trangle_area(a, b, c) + area_abd = trangle_area(a, b, d) + + if area_abc * area_abd >= 0: + return False + + area_cda = trangle_area(c, d, a) + area_cdb = area_cda + area_abc - area_abd + + if area_cda * area_cdb >= 0: + return False + t = area_cda / (area_abd - area_abc) + + dx = t * (b[0] - a[0]) + dy = t * (b[1] - a[1]) + temp_pts[0] = a[0] + dx + temp_pts[1] = a[1] + dy + return True + + +@cuda.jit('(float32, float32, float32[:])', device=True, inline=True) +def point_in_quadrilateral(pt_x, pt_y, corners): + ab0 = corners[2] - corners[0] + ab1 = corners[3] - corners[1] + + ad0 = corners[6] - corners[0] + ad1 = corners[7] - corners[1] + + ap0 = pt_x - corners[0] + ap1 = pt_y - corners[1] + + abab = ab0 * ab0 + ab1 * ab1 + abap = ab0 * ap0 + ab1 * ap1 + adad = ad0 * ad0 + ad1 * ad1 + adap = ad0 * ap0 + ad1 * ap1 + + return abab >= abap and abap >= 0 and adad >= adap and adap >= 0 + + +@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True) +def quadrilateral_intersection(pts1, pts2, int_pts): + num_of_inter = 0 + for i in range(4): + if point_in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2): + int_pts[num_of_inter * 2] = pts1[2 * i] + int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1] + num_of_inter += 1 + if point_in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1): + int_pts[num_of_inter * 2] = pts2[2 * i] + int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1] + num_of_inter += 1 + temp_pts = cuda.local.array((2, ), dtype=numba.float32) + for i in range(4): + for j in range(4): + has_pts = line_segment_intersection(pts1, pts2, i, j, temp_pts) + if has_pts: + int_pts[num_of_inter * 2] = temp_pts[0] + int_pts[num_of_inter * 2 + 1] = temp_pts[1] + num_of_inter += 1 + + return num_of_inter + + +@cuda.jit('(float32[:], float32[:])', device=True, inline=True) +def rbbox_to_corners(corners, rbbox): + # generate clockwise corners and rotate it clockwise + angle = rbbox[4] + a_cos = math.cos(angle) + a_sin = math.sin(angle) + center_x = rbbox[0] + center_y = rbbox[1] + x_d = rbbox[2] + y_d = rbbox[3] + corners_x = cuda.local.array((4, ), dtype=numba.float32) + corners_y = cuda.local.array((4, ), dtype=numba.float32) + corners_x[0] = -x_d / 2 + corners_x[1] = -x_d / 2 + corners_x[2] = x_d / 2 + corners_x[3] = x_d / 2 + corners_y[0] = -y_d / 2 + corners_y[1] = y_d / 2 + corners_y[2] = y_d / 2 + corners_y[3] = -y_d / 2 + for i in range(4): + corners[2 * + i] = a_cos * corners_x[i] + a_sin * corners_y[i] + center_x + corners[2 * i + + 1] = -a_sin * corners_x[i] + a_cos * corners_y[i] + center_y + + +@cuda.jit('(float32[:], float32[:])', device=True, inline=True) +def inter(rbbox1, rbbox2): + corners1 = cuda.local.array((8, ), dtype=numba.float32) + corners2 = cuda.local.array((8, ), dtype=numba.float32) + intersection_corners = cuda.local.array((16, ), dtype=numba.float32) + + rbbox_to_corners(corners1, rbbox1) + rbbox_to_corners(corners2, rbbox2) + + num_intersection = quadrilateral_intersection(corners1, corners2, + intersection_corners) + sort_vertex_in_convex_polygon(intersection_corners, num_intersection) + # print(intersection_corners.reshape([-1, 2])[:num_intersection]) + + return area(intersection_corners, num_intersection) + + +@cuda.jit('(float32[:], float32[:], int32)', device=True, inline=True) +def devRotateIoUEval(rbox1, rbox2, criterion=-1): + area1 = rbox1[2] * rbox1[3] + area2 = rbox2[2] * rbox2[3] + area_inter = inter(rbox1, rbox2) + if criterion == -1: + return area_inter / (area1 + area2 - area_inter) + elif criterion == 0: + return area_inter / area1 + elif criterion == 1: + return area_inter / area2 + else: + return area_inter + +@cuda.jit('(int64, int64, float32[:], float32[:], float32[:], int32)', fastmath=False) +def rotate_iou_kernel_eval(N, K, dev_boxes, dev_query_boxes, dev_iou, criterion=-1): + threadsPerBlock = 8 * 8 + row_start = cuda.blockIdx.x + col_start = cuda.blockIdx.y + tx = cuda.threadIdx.x + row_size = min(N - row_start * threadsPerBlock, threadsPerBlock) + col_size = min(K - col_start * threadsPerBlock, threadsPerBlock) + block_boxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32) + block_qboxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32) + + dev_query_box_idx = threadsPerBlock * col_start + tx + dev_box_idx = threadsPerBlock * row_start + tx + if (tx < col_size): + block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0] + block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1] + block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2] + block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3] + block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4] + if (tx < row_size): + block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0] + block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1] + block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2] + block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3] + block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4] + cuda.syncthreads() + if tx < row_size: + for i in range(col_size): + offset = row_start * threadsPerBlock * K + col_start * threadsPerBlock + tx * K + i + dev_iou[offset] = devRotateIoUEval(block_qboxes[i * 5:i * 5 + 5], + block_boxes[tx * 5:tx * 5 + 5], criterion) + + +def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0): + """rotated box iou running in gpu. 500x faster than cpu version + (take 5ms in one example with numba.cuda code). + convert from [this project]( + https://github.com/hongzhenwang/RRPN-revise/tree/master/pcdet/rotation). + + Args: + boxes (float tensor: [N, 5]): rbboxes. format: centers, dims, + angles(clockwise when positive) + query_boxes (float tensor: [K, 5]): [description] + device_id (int, optional): Defaults to 0. [description] + + Returns: + [type]: [description] + """ + box_dtype = boxes.dtype + boxes = boxes.astype(np.float32) + query_boxes = query_boxes.astype(np.float32) + N = boxes.shape[0] + K = query_boxes.shape[0] + iou = np.zeros((N, K), dtype=np.float32) + if N == 0 or K == 0: + return iou + threadsPerBlock = 8 * 8 + cuda.select_device(device_id) + blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock)) + + stream = cuda.stream() + with stream.auto_synchronize(): + boxes_dev = cuda.to_device(boxes.reshape([-1]), stream) + query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream) + iou_dev = cuda.to_device(iou.reshape([-1]), stream) + rotate_iou_kernel_eval[blockspergrid, threadsPerBlock, stream]( + N, K, boxes_dev, query_boxes_dev, iou_dev, criterion) + iou_dev.copy_to_host(iou.reshape([-1]), stream=stream) + return iou.astype(boxes.dtype) diff --git a/toolbox/openpcdet/pcdet/datasets/kitti/kitti_utils.py b/toolbox/openpcdet/pcdet/datasets/kitti/kitti_utils.py new file mode 100644 index 000000000..62b487806 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/kitti/kitti_utils.py @@ -0,0 +1,66 @@ +import numpy as np +from ...utils import box_utils + + +def transform_annotations_to_kitti_format(annos, map_name_to_kitti=None, info_with_fakelidar=False): + """ + Args: + annos: + map_name_to_kitti: dict, map name to KITTI names (Car, Pedestrian, Cyclist) + info_with_fakelidar: + Returns: + + """ + for anno in annos: + # For lyft and nuscenes, different anno key in info + if 'name' not in anno: + anno['name'] = anno['gt_names'] + anno.pop('gt_names') + + for k in range(anno['name'].shape[0]): + anno['name'][k] = map_name_to_kitti[anno['name'][k]] + + anno['bbox'] = np.zeros((len(anno['name']), 4)) + anno['bbox'][:, 2:4] = 50 # [0, 0, 50, 50] + anno['truncated'] = np.zeros(len(anno['name'])) + anno['occluded'] = np.zeros(len(anno['name'])) + if 'boxes_lidar' in anno: + gt_boxes_lidar = anno['boxes_lidar'].copy() + else: + gt_boxes_lidar = anno['gt_boxes_lidar'].copy() + + if len(gt_boxes_lidar) > 0: + if info_with_fakelidar: + gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar(gt_boxes_lidar) + + gt_boxes_lidar[:, 2] -= gt_boxes_lidar[:, 5] / 2 + anno['location'] = np.zeros((gt_boxes_lidar.shape[0], 3)) + anno['location'][:, 0] = -gt_boxes_lidar[:, 1] # x = -y_lidar + anno['location'][:, 1] = -gt_boxes_lidar[:, 2] # y = -z_lidar + anno['location'][:, 2] = gt_boxes_lidar[:, 0] # z = x_lidar + dxdydz = gt_boxes_lidar[:, 3:6] + anno['dimensions'] = dxdydz[:, [0, 2, 1]] # lwh ==> lhw + anno['rotation_y'] = -gt_boxes_lidar[:, 6] - np.pi / 2.0 + anno['alpha'] = -np.arctan2(-gt_boxes_lidar[:, 1], gt_boxes_lidar[:, 0]) + anno['rotation_y'] + else: + anno['location'] = anno['dimensions'] = np.zeros((0, 3)) + anno['rotation_y'] = anno['alpha'] = np.zeros(0) + + return annos + + +def calib_to_matricies(calib): + """ + Converts calibration object to transformation matricies + Args: + calib: calibration.Calibration, Calibration object + Returns + V2R: (4, 4), Lidar to rectified camera transformation matrix + P2: (3, 4), Camera projection matrix + """ + V2C = np.vstack((calib.V2C, np.array([0, 0, 0, 1], dtype=np.float32))) # (4, 4) + R0 = np.hstack((calib.R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4) + R0 = np.vstack((R0, np.array([0, 0, 0, 1], dtype=np.float32))) # (4, 4) + V2R = R0 @ V2C + P2 = calib.P2 + return V2R, P2 \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/datasets/lyft/__init__.py b/toolbox/openpcdet/pcdet/datasets/lyft/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/datasets/lyft/lyft_dataset.py b/toolbox/openpcdet/pcdet/datasets/lyft/lyft_dataset.py new file mode 100644 index 000000000..4fd197acd --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/lyft/lyft_dataset.py @@ -0,0 +1,303 @@ +import copy +import pickle +from pathlib import Path + +import numpy as np +from tqdm import tqdm + +from ...ops.roiaware_pool3d import roiaware_pool3d_utils +from ...utils import common_utils, box_utils +from ..dataset import DatasetTemplate + + +class LyftDataset(DatasetTemplate): + def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None): + self.root_path = (root_path if root_path is not None else Path(dataset_cfg.DATA_PATH)) / dataset_cfg.VERSION + super().__init__( + dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=self.root_path, logger=logger + ) + self.infos = [] + self.include_lyft_data(self.mode) + + def include_lyft_data(self, mode): + self.logger.info('Loading lyft dataset') + lyft_infos = [] + + for info_path in self.dataset_cfg.INFO_PATH[mode]: + info_path = self.root_path / info_path + if not info_path.exists(): + continue + with open(info_path, 'rb') as f: + infos = pickle.load(f) + lyft_infos.extend(infos) + + self.infos.extend(lyft_infos) + self.logger.info('Total samples for lyft dataset: %d' % (len(lyft_infos))) + + @staticmethod + def remove_ego_points(points, center_radius=1.0): + mask = ~((np.abs(points[:, 0]) < center_radius*1.5) & (np.abs(points[:, 1]) < center_radius)) + return points[mask] + + def get_sweep(self, sweep_info): + lidar_path = self.root_path / sweep_info['lidar_path'] + points_sweep = np.fromfile(str(lidar_path), dtype=np.float32, count=-1) + if points_sweep.shape[0] % 5 != 0: + points_sweep = points_sweep[: points_sweep.shape[0] - (points_sweep.shape[0] % 5)] + points_sweep = points_sweep.reshape([-1, 5])[:, :4] + + points_sweep = self.remove_ego_points(points_sweep).T + if sweep_info['transform_matrix'] is not None: + num_points = points_sweep.shape[1] + points_sweep[:3, :] = sweep_info['transform_matrix'].dot( + np.vstack((points_sweep[:3, :], np.ones(num_points))))[:3, :] + + cur_times = sweep_info['time_lag'] * np.ones((1, points_sweep.shape[1])) + return points_sweep.T, cur_times.T + + def get_lidar_with_sweeps(self, index, max_sweeps=1): + info = self.infos[index] + lidar_path = self.root_path / info['lidar_path'] + points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1) + if points.shape[0] % 5 != 0: + points = points[: points.shape[0] - (points.shape[0] % 5)] + points = points.reshape([-1, 5])[:, :4] + + sweep_points_list = [points] + sweep_times_list = [np.zeros((points.shape[0], 1))] + + for k in np.random.choice(len(info['sweeps']), max_sweeps - 1, replace=False): + points_sweep, times_sweep = self.get_sweep(info['sweeps'][k]) + sweep_points_list.append(points_sweep) + sweep_times_list.append(times_sweep) + + points = np.concatenate(sweep_points_list, axis=0) + times = np.concatenate(sweep_times_list, axis=0).astype(points.dtype) + + points = np.concatenate((points, times), axis=1) + return points + + def __len__(self): + if self._merge_all_iters_to_one_epoch: + return len(self.infos) * self.total_epochs + + return len(self.infos) + + def __getitem__(self, index): + if self._merge_all_iters_to_one_epoch: + index = index % len(self.infos) + + info = copy.deepcopy(self.infos[index]) + points = self.get_lidar_with_sweeps(index, max_sweeps=self.dataset_cfg.MAX_SWEEPS) + + input_dict = { + 'points': points, + 'frame_id': Path(info['lidar_path']).stem, + 'metadata': {'token': info['token']} + } + + if 'gt_boxes' in info: + input_dict.update({ + 'gt_boxes': info['gt_boxes'], + 'gt_names': info['gt_names'] + }) + + data_dict = self.prepare_data(data_dict=input_dict) + + return data_dict + + def kitti_eval(self, eval_det_annos, eval_gt_annos, class_names): + from ..kitti.kitti_object_eval_python import eval as kitti_eval + from ..kitti import kitti_utils + + map_name_to_kitti = { + 'car': 'Car', + 'pedestrian': 'Pedestrian', + 'truck': 'Truck', + 'bicycle': 'Cyclist', + 'motorcycle': 'Cyclist' + } + + kitti_utils.transform_to_kitti_format(eval_det_annos, map_name_to_kitti=map_name_to_kitti) + kitti_utils.transform_to_kitti_format( + eval_gt_annos, map_name_to_kitti=map_name_to_kitti, + info_with_fakelidar=self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False) + ) + + kitti_class_names = [map_name_to_kitti[x] for x in class_names] + + ap_result_str, ap_dict = kitti_eval.get_official_eval_result( + gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=kitti_class_names + ) + return ap_result_str, ap_dict + + def evaluation(self, det_annos, class_names, **kwargs): + if kwargs['eval_metric'] == 'kitti': + eval_det_annos = copy.deepcopy(det_annos) + eval_gt_annos = copy.deepcopy(self.infos) + return self.kitti_eval(eval_det_annos, eval_gt_annos, class_names) + elif kwargs['eval_metric'] == 'lyft': + return self.lyft_eval(det_annos, class_names, + iou_thresholds=self.dataset_cfg.EVAL_LYFT_IOU_LIST) + else: + raise NotImplementedError + + def lyft_eval(self, det_annos, class_names, iou_thresholds=[0.5]): + from lyft_dataset_sdk.lyftdataset import LyftDataset as Lyft + from . import lyft_utils + # from lyft_dataset_sdk.eval.detection.mAP_evaluation import get_average_precisions + from .lyft_mAP_eval.lyft_eval import get_average_precisions + + lyft = Lyft(json_path=self.root_path / 'data', data_path=self.root_path, verbose=True) + + det_lyft_boxes, sample_tokens = lyft_utils.convert_det_to_lyft_format(lyft, det_annos) + gt_lyft_boxes = lyft_utils.load_lyft_gt_by_tokens(lyft, sample_tokens) + + average_precisions = get_average_precisions(gt_lyft_boxes, det_lyft_boxes, class_names, iou_thresholds) + + ap_result_str, ap_dict = lyft_utils.format_lyft_results(average_precisions, class_names, iou_thresholds, version=self.dataset_cfg.VERSION) + + return ap_result_str, ap_dict + + def create_groundtruth_database(self, used_classes=None, max_sweeps=10): + import torch + + database_save_path = self.root_path / f'gt_database' + db_info_save_path = self.root_path / f'lyft_dbinfos_{max_sweeps}sweeps.pkl' + + database_save_path.mkdir(parents=True, exist_ok=True) + all_db_infos = {} + + for idx in tqdm(range(len(self.infos))): + sample_idx = idx + info = self.infos[idx] + points = self.get_lidar_with_sweeps(idx, max_sweeps=max_sweeps) + gt_boxes = info['gt_boxes'] + gt_names = info['gt_names'] + + box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu( + torch.from_numpy(points[:, 0:3]).unsqueeze(dim=0).float().cuda(), + torch.from_numpy(gt_boxes[:, 0:7]).unsqueeze(dim=0).float().cuda() + ).long().squeeze(dim=0).cpu().numpy() + + for i in range(gt_boxes.shape[0]): + filename = '%s_%s_%d.bin' % (sample_idx, gt_names[i], i) + filepath = database_save_path / filename + gt_points = points[box_idxs_of_pts == i] + + gt_points[:, :3] -= gt_boxes[i, :3] + with open(filepath, 'w') as f: + gt_points.tofile(f) + + if (used_classes is None) or gt_names[i] in used_classes: + db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin + db_info = {'name': gt_names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i, + 'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0]} + if gt_names[i] in all_db_infos: + all_db_infos[gt_names[i]].append(db_info) + else: + all_db_infos[gt_names[i]] = [db_info] + for k, v in all_db_infos.items(): + print('Database %s: %d' % (k, len(v))) + + with open(db_info_save_path, 'wb') as f: + pickle.dump(all_db_infos, f) + + +def create_lyft_info(version, data_path, save_path, split, max_sweeps=10): + from lyft_dataset_sdk.lyftdataset import LyftDataset + from . import lyft_utils + data_path = data_path / version + save_path = save_path / version + split_path = data_path.parent / 'ImageSets' + + if split is not None: + save_path = save_path / split + split_path = split_path / split + + save_path.mkdir(exist_ok=True) + + assert version in ['trainval', 'one_scene', 'test'] + + if version == 'trainval': + train_split_path = split_path / 'train.txt' + val_split_path = split_path / 'val.txt' + elif version == 'test': + train_split_path = split_path / 'test.txt' + val_split_path = None + elif version == 'one_scene': + train_split_path = split_path / 'one_scene.txt' + val_split_path = split_path / 'one_scene.txt' + else: + raise NotImplementedError + + train_scenes = [x.strip() for x in open(train_split_path).readlines()] if train_split_path.exists() else [] + val_scenes = [x.strip() for x in open(val_split_path).readlines()] if val_split_path is not None and val_split_path.exists() else [] + + lyft = LyftDataset(json_path=data_path / 'data', data_path=data_path, verbose=True) + + available_scenes = lyft_utils.get_available_scenes(lyft) + available_scene_names = [s['name'] for s in available_scenes] + train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes)) + val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) + train_scenes = set([available_scenes[available_scene_names.index(s)]['token'] for s in train_scenes]) + val_scenes = set([available_scenes[available_scene_names.index(s)]['token'] for s in val_scenes]) + + print('%s: train scene(%d), val scene(%d)' % (version, len(train_scenes), len(val_scenes))) + + train_lyft_infos, val_lyft_infos = lyft_utils.fill_trainval_infos( + data_path=data_path, lyft=lyft, train_scenes=train_scenes, val_scenes=val_scenes, + test='test' in version, max_sweeps=max_sweeps + ) + + if version == 'test': + print('test sample: %d' % len(train_lyft_infos)) + with open(save_path / f'lyft_infos_test.pkl', 'wb') as f: + pickle.dump(train_lyft_infos, f) + else: + print('train sample: %d, val sample: %d' % (len(train_lyft_infos), len(val_lyft_infos))) + with open(save_path / f'lyft_infos_train.pkl', 'wb') as f: + pickle.dump(train_lyft_infos, f) + with open(save_path / f'lyft_infos_val.pkl', 'wb') as f: + pickle.dump(val_lyft_infos, f) + + +if __name__ == '__main__': + import yaml + import argparse + from pathlib import Path + from easydict import EasyDict + + parser = argparse.ArgumentParser(description='arg parser') + parser.add_argument('--cfg_file', type=str, default=None, help='specify the config of dataset') + parser.add_argument('--func', type=str, default='create_lyft_infos', help='') + parser.add_argument('--version', type=str, default='trainval', help='') + parser.add_argument('--split', type=str, default=None, help='') + parser.add_argument('--max_sweeps', type=int, default=10, help='') + args = parser.parse_args() + + if args.func == 'create_lyft_infos': + try: + yaml_config = yaml.safe_load(open(args.cfg_file), Loader=yaml.FullLoader) + except: + yaml_config = yaml.safe_load(open(args.cfg_file)) + dataset_cfg = EasyDict(yaml_config) + ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve() + dataset_cfg.VERSION = args.version + dataset_cfg.MAX_SWEEPS = args.max_sweeps + create_lyft_info( + version=dataset_cfg.VERSION, + data_path=ROOT_DIR / 'data' / 'lyft', + save_path=ROOT_DIR / 'data' / 'lyft', + split=args.split, + max_sweeps=dataset_cfg.MAX_SWEEPS + ) + + lyft_dataset = LyftDataset( + dataset_cfg=dataset_cfg, class_names=None, + root_path=ROOT_DIR / 'data' / 'lyft', + logger=common_utils.create_logger(), training=True + ) + + if args.version != 'test': + lyft_dataset.create_groundtruth_database(max_sweeps=dataset_cfg.MAX_SWEEPS) diff --git a/toolbox/openpcdet/pcdet/datasets/lyft/lyft_mAP_eval/__init__.py b/toolbox/openpcdet/pcdet/datasets/lyft/lyft_mAP_eval/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/datasets/lyft/lyft_mAP_eval/lyft_eval.py b/toolbox/openpcdet/pcdet/datasets/lyft/lyft_mAP_eval/lyft_eval.py new file mode 100644 index 000000000..872476ae3 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/lyft/lyft_mAP_eval/lyft_eval.py @@ -0,0 +1,435 @@ +""" +modified from lyft toolkit https://github.com/lyft/nuscenes-devkit.git +""" + +""" +mAP 3D calculation for the data in nuScenes format. + + +The intput files expected to have the format: + +Expected fields: + + +gt = [{ + 'sample_token': '0f0e3ce89d2324d8b45aa55a7b4f8207fbb039a550991a5149214f98cec136ac', + 'translation': [974.2811881299899, 1714.6815014457964, -23.689857123368846], + 'size': [1.796, 4.488, 1.664], + 'rotation': [0.14882026466054782, 0, 0, 0.9888642620837121], + 'name': 'car' +}] + +prediction_result = { + 'sample_token': '0f0e3ce89d2324d8b45aa55a7b4f8207fbb039a550991a5149214f98cec136ac', + 'translation': [971.8343488872263, 1713.6816097857359, -25.82534357061308], + 'size': [2.519726579986132, 7.810161372666739, 3.483438286096803], + 'rotation': [0.10913582721095375, 0.04099572636992043, 0.01927712319721745, 1.029328402625659], + 'name': 'car', + 'score': 0.3077029437237213 +} + + +input arguments: + +--pred_file: file with predictions +--gt_file: ground truth file +--iou_threshold: IOU threshold + + +In general we would be interested in average of mAP at thresholds [0.5, 0.55, 0.6, 0.65,...0.95], similar to the +standard COCO => one needs to run this file N times for every IOU threshold independently. + +""" + +import argparse +import json +from collections import defaultdict +from pathlib import Path + +import numpy as np +from pyquaternion import Quaternion +from shapely.geometry import Polygon + + +class Box3D: + """Data class used during detection evaluation. Can be a prediction or ground truth.""" + + def __init__(self, **kwargs): + sample_token = kwargs["sample_token"] + translation = kwargs["translation"] + size = kwargs["size"] + rotation = kwargs["rotation"] + name = kwargs["name"] + score = kwargs.get("score", -1) + + if not isinstance(sample_token, str): + raise TypeError("Sample_token must be a string!") + + if not len(translation) == 3: + raise ValueError("Translation must have 3 elements!") + + if np.any(np.isnan(translation)): + raise ValueError("Translation may not be NaN!") + + if not len(size) == 3: + raise ValueError("Size must have 3 elements!") + + if np.any(np.isnan(size)): + raise ValueError("Size may not be NaN!") + + if not len(rotation) == 4: + raise ValueError("Rotation must have 4 elements!") + + if np.any(np.isnan(rotation)): + raise ValueError("Rotation may not be NaN!") + + if name is None: + raise ValueError("Name cannot be empty!") + + # Assign. + self.sample_token = sample_token + self.translation = translation + self.size = size + self.volume = np.prod(self.size) + self.score = score + + assert np.all([x > 0 for x in size]) + self.rotation = rotation + self.name = name + self.quaternion = Quaternion(self.rotation) + + self.width, self.length, self.height = size + + self.center_x, self.center_y, self.center_z = self.translation + + self.min_z = self.center_z - self.height / 2 + self.max_z = self.center_z + self.height / 2 + + self.ground_bbox_coords = None + self.ground_bbox_coords = self.get_ground_bbox_coords() + + @staticmethod + def check_orthogonal(a, b, c): + """Check that vector (b - a) is orthogonal to the vector (c - a).""" + return np.isclose((b[0] - a[0]) * (c[0] - a[0]) + (b[1] - a[1]) * (c[1] - a[1]), 0) + + def get_ground_bbox_coords(self): + if self.ground_bbox_coords is not None: + return self.ground_bbox_coords + return self.calculate_ground_bbox_coords() + + def calculate_ground_bbox_coords(self): + """We assume that the 3D box has lower plane parallel to the ground. + + Returns: Polygon with 4 points describing the base. + + """ + if self.ground_bbox_coords is not None: + return self.ground_bbox_coords + + rotation_matrix = self.quaternion.rotation_matrix + + cos_angle = rotation_matrix[0, 0] + sin_angle = rotation_matrix[1, 0] + + point_0_x = self.center_x + self.length / 2 * cos_angle + self.width / 2 * sin_angle + point_0_y = self.center_y + self.length / 2 * sin_angle - self.width / 2 * cos_angle + + point_1_x = self.center_x + self.length / 2 * cos_angle - self.width / 2 * sin_angle + point_1_y = self.center_y + self.length / 2 * sin_angle + self.width / 2 * cos_angle + + point_2_x = self.center_x - self.length / 2 * cos_angle - self.width / 2 * sin_angle + point_2_y = self.center_y - self.length / 2 * sin_angle + self.width / 2 * cos_angle + + point_3_x = self.center_x - self.length / 2 * cos_angle + self.width / 2 * sin_angle + point_3_y = self.center_y - self.length / 2 * sin_angle - self.width / 2 * cos_angle + + point_0 = point_0_x, point_0_y + point_1 = point_1_x, point_1_y + point_2 = point_2_x, point_2_y + point_3 = point_3_x, point_3_y + + assert self.check_orthogonal(point_0, point_1, point_3) + assert self.check_orthogonal(point_1, point_0, point_2) + assert self.check_orthogonal(point_2, point_1, point_3) + assert self.check_orthogonal(point_3, point_0, point_2) + + self.ground_bbox_coords = Polygon( + [ + (point_0_x, point_0_y), + (point_1_x, point_1_y), + (point_2_x, point_2_y), + (point_3_x, point_3_y), + (point_0_x, point_0_y), + ] + ) + + return self.ground_bbox_coords + + def get_height_intersection(self, other): + min_z = max(other.min_z, self.min_z) + max_z = min(other.max_z, self.max_z) + + return max(0, max_z - min_z) + + def get_area_intersection(self, other) -> float: + result = self.ground_bbox_coords.intersection(other.ground_bbox_coords).area + + assert result <= self.width * self.length + + return result + + def get_intersection(self, other) -> float: + height_intersection = self.get_height_intersection(other) + + area_intersection = self.ground_bbox_coords.intersection(other.ground_bbox_coords).area + + return height_intersection * area_intersection + + def get_iou(self, other): + intersection = self.get_intersection(other) + union = self.volume + other.volume - intersection + + iou = np.clip(intersection / union, 0, 1) + + return iou + + def __repr__(self): + return str(self.serialize()) + + def serialize(self) -> dict: + """Returns: Serialized instance as dict.""" + + return { + "sample_token": self.sample_token, + "translation": self.translation, + "size": self.size, + "rotation": self.rotation, + "name": self.name, + "volume": self.volume, + "score": self.score, + } + + +def group_by_key(detections, key): + groups = defaultdict(list) + for detection in detections: + groups[detection[key]].append(detection) + return groups + + +def wrap_in_box(input): + result = {} + for key, value in input.items(): + result[key] = [Box3D(**x) for x in value] + + return result + + +def get_envelope(precisions): + """Compute the precision envelope. + + Args: + precisions: + + Returns: + + """ + for i in range(precisions.size - 1, 0, -1): + precisions[i - 1] = np.maximum(precisions[i - 1], precisions[i]) + return precisions + + +def get_ap(recalls, precisions): + """Calculate average precision. + + Args: + recalls: + precisions: Returns (float): average precision. + + Returns: + + """ + # correct AP calculation + # first append sentinel values at the end + recalls = np.concatenate(([0.0], recalls, [1.0])) + precisions = np.concatenate(([0.0], precisions, [0.0])) + + precisions = get_envelope(precisions) + + # to calculate area under PR curve, look for points where X axis (recall) changes value + i = np.where(recalls[1:] != recalls[:-1])[0] + + # and sum (\Delta recall) * prec + ap = np.sum((recalls[i + 1] - recalls[i]) * precisions[i + 1]) + return ap + + +def get_ious(gt_boxes, predicted_box): + return [predicted_box.get_iou(x) for x in gt_boxes] + + +def recall_precision(gt, predictions, iou_threshold_list): + num_gts = len(gt) + + if num_gts == 0: + return -1, -1, -1 + + image_gts = group_by_key(gt, "sample_token") + image_gts = wrap_in_box(image_gts) + + sample_gt_checked = {sample_token: np.zeros((len(boxes), len(iou_threshold_list))) for sample_token, boxes in image_gts.items()} + + predictions = sorted(predictions, key=lambda x: x["score"], reverse=True) + + # go down dets and mark TPs and FPs + num_predictions = len(predictions) + tp = np.zeros((num_predictions, len(iou_threshold_list))) + fp = np.zeros((num_predictions, len(iou_threshold_list))) + + for prediction_index, prediction in enumerate(predictions): + predicted_box = Box3D(**prediction) + + sample_token = prediction["sample_token"] + + max_overlap = -np.inf + jmax = -1 + + try: + gt_boxes = image_gts[sample_token] # gt_boxes per sample + gt_checked = sample_gt_checked[sample_token] # gt flags per sample + except KeyError: + gt_boxes = [] + gt_checked = None + + if len(gt_boxes) > 0: + overlaps = get_ious(gt_boxes, predicted_box) + + max_overlap = np.max(overlaps) + + jmax = np.argmax(overlaps) + + for i, iou_threshold in enumerate(iou_threshold_list): + if max_overlap > iou_threshold: + if gt_checked[jmax, i] == 0: + tp[prediction_index, i] = 1.0 + gt_checked[jmax, i] = 1 + else: + fp[prediction_index, i] = 1.0 + else: + fp[prediction_index, i] = 1.0 + + # compute precision recall + fp = np.cumsum(fp, axis=0) + tp = np.cumsum(tp, axis=0) + + recalls = tp / float(num_gts) + + assert np.all(0 <= recalls) & np.all(recalls <= 1) + + # avoid divide by zero in case the first detection matches a difficult ground truth + precisions = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) + + assert np.all(0 <= precisions) & np.all(precisions <= 1) + + ap_list = [] + for i in range(len(iou_threshold_list)): + recall = recalls[:, i] + precision = precisions[:, i] + ap = get_ap(recall, precision) + ap_list.append(ap) + + return recalls, precisions, ap_list + + +def get_average_precisions(gt: list, predictions: list, class_names: list, iou_thresholds: list) -> np.array: + """Returns an array with an average precision per class. + + + Args: + gt: list of dictionaries in the format described below. + predictions: list of dictionaries in the format described below. + class_names: list of the class names. + iou_threshold: list of IOU thresholds used to calculate TP / FN + + Returns an array with an average precision per class. + + + Ground truth and predictions should have schema: + + gt = [{ + 'sample_token': '0f0e3ce89d2324d8b45aa55a7b4f8207fbb039a550991a5149214f98cec136ac', + 'translation': [974.2811881299899, 1714.6815014457964, -23.689857123368846], + 'size': [1.796, 4.488, 1.664], + 'rotation': [0.14882026466054782, 0, 0, 0.9888642620837121], + 'name': 'car' + }] + + predictions = [{ + 'sample_token': '0f0e3ce89d2324d8b45aa55a7b4f8207fbb039a550991a5149214f98cec136ac', + 'translation': [971.8343488872263, 1713.6816097857359, -25.82534357061308], + 'size': [2.519726579986132, 7.810161372666739, 3.483438286096803], + 'rotation': [0.10913582721095375, 0.04099572636992043, 0.01927712319721745, 1.029328402625659], + 'name': 'car', + 'score': 0.3077029437237213 + }] + + """ + assert all([0 <= iou_th <= 1 for iou_th in iou_thresholds]) + + gt_by_class_name = group_by_key(gt, "name") + pred_by_class_name = group_by_key(predictions, "name") + + average_precisions = np.zeros(len(class_names)) + + for class_id, class_name in enumerate(class_names): + if class_name in pred_by_class_name: + recalls, precisions, ap_list = recall_precision( + gt_by_class_name[class_name], pred_by_class_name[class_name], iou_thresholds + ) + aps = np.mean(ap_list) + average_precisions[class_id] = aps + + return average_precisions + + +def get_class_names(gt: dict) -> list: + """Get sorted list of class names. + + Args: + gt: + + Returns: Sorted list of class names. + + """ + return sorted(list(set([x["name"] for x in gt]))) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + arg = parser.add_argument + arg("-p", "--pred_file", type=str, help="Path to the predictions file.", required=True) + arg("-g", "--gt_file", type=str, help="Path to the ground truth file.", required=True) + arg("-t", "--iou_threshold", type=float, help="iou threshold", default=0.5) + + args = parser.parse_args() + + gt_path = Path(args.gt_file) + pred_path = Path(args.pred_file) + + with open(args.pred_file) as f: + predictions = json.load(f) + + with open(args.gt_file) as f: + gt = json.load(f) + + class_names = get_class_names(gt) + print("Class_names = ", class_names) + + average_precisions = get_average_precisions(gt, predictions, class_names, args.iou_threshold) + + mAP = np.mean(average_precisions) + print("Average per class mean average precision = ", mAP) + + for class_id in sorted(list(zip(class_names, average_precisions.flatten().tolist()))): + print(class_id) diff --git a/toolbox/openpcdet/pcdet/datasets/lyft/lyft_utils.py b/toolbox/openpcdet/pcdet/datasets/lyft/lyft_utils.py new file mode 100644 index 000000000..30e057ef9 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/lyft/lyft_utils.py @@ -0,0 +1,332 @@ +""" +The Lyft data pre-processing and evaluation is modified from +https://github.com/poodarchu/Det3D +""" + +import operator +from functools import reduce +from pathlib import Path + +import numpy as np +import tqdm +from lyft_dataset_sdk.utils.data_classes import Box, Quaternion +from lyft_dataset_sdk.lyftdataset import LyftDataset +from lyft_dataset_sdk.utils.geometry_utils import transform_matrix +from lyft_dataset_sdk.eval.detection.mAP_evaluation import Box3D + + +def get_available_scenes(lyft): + available_scenes = [] + print('total scene num:', len(lyft.scene)) + for scene in lyft.scene: + scene_token = scene['token'] + scene_rec = lyft.get('scene', scene_token) + sample_rec = lyft.get('sample', scene_rec['first_sample_token']) + sd_rec = lyft.get('sample_data', sample_rec['data']['LIDAR_TOP']) + has_more_frames = True + scene_not_exist = False + while has_more_frames: + lidar_path, boxes, _ = lyft.get_sample_data(sd_rec['token']) + if not Path(lidar_path).exists(): + scene_not_exist = True + break + else: + break + # if not sd_rec['next'] == '': + # sd_rec = nusc.get('sample_data', sd_rec['next']) + # else: + # has_more_frames = False + if scene_not_exist: + continue + available_scenes.append(scene) + print('exist scene num:', len(available_scenes)) + return available_scenes + + +def get_sample_data(lyft, sample_data_token): + sd_rec = lyft.get("sample_data", sample_data_token) + cs_rec = lyft.get("calibrated_sensor", sd_rec["calibrated_sensor_token"]) + + sensor_rec = lyft.get("sensor", cs_rec["sensor_token"]) + pose_rec = lyft.get("ego_pose", sd_rec["ego_pose_token"]) + + boxes = lyft.get_boxes(sample_data_token) + + box_list = [] + for box in boxes: + box.translate(-np.array(pose_rec["translation"])) + box.rotate(Quaternion(pose_rec["rotation"]).inverse) + + box.translate(-np.array(cs_rec["translation"])) + box.rotate(Quaternion(cs_rec["rotation"]).inverse) + + box_list.append(box) + + return box_list, pose_rec + + +def quaternion_yaw(q: Quaternion) -> float: + """ + Calculate the yaw angle from a quaternion. + Note that this only works for a quaternion that represents a box in lidar or global coordinate frame. + It does not work for a box in the camera frame. + :param q: Quaternion of interest. + :return: Yaw angle in radians. + """ + + # Project into xy plane. + v = np.dot(q.rotation_matrix, np.array([1, 0, 0])) + + # Measure yaw using arctan. + yaw = np.arctan2(v[1], v[0]) + + return yaw + + +def fill_trainval_infos(data_path, lyft, train_scenes, val_scenes, test=False, max_sweeps=10): + train_lyft_infos = [] + val_lyft_infos = [] + progress_bar = tqdm.tqdm(total=len(lyft.sample), desc='create_info', dynamic_ncols=True) + + # ref_chans = ["LIDAR_TOP", "LIDAR_FRONT_LEFT", "LIDAR_FRONT_RIGHT"] + ref_chan = "LIDAR_TOP" + + for index, sample in enumerate(lyft.sample): + progress_bar.update() + + ref_info = {} + ref_sd_token = sample["data"][ref_chan] + ref_sd_rec = lyft.get("sample_data", ref_sd_token) + ref_cs_token = ref_sd_rec["calibrated_sensor_token"] + ref_cs_rec = lyft.get("calibrated_sensor", ref_cs_token) + + ref_to_car = transform_matrix( + ref_cs_rec["translation"], + Quaternion(ref_cs_rec["rotation"]), + inverse=False, + ) + + ref_from_car = transform_matrix( + ref_cs_rec["translation"], + Quaternion(ref_cs_rec["rotation"]), + inverse=True, + ) + + ref_lidar_path = lyft.get_sample_data_path(ref_sd_token) + + ref_boxes, ref_pose_rec = get_sample_data(lyft, ref_sd_token) + ref_time = 1e-6 * ref_sd_rec["timestamp"] + car_from_global = transform_matrix( + ref_pose_rec["translation"], + Quaternion(ref_pose_rec["rotation"]), + inverse=True, + ) + + car_to_global = transform_matrix( + ref_pose_rec["translation"], + Quaternion(ref_pose_rec["rotation"]), + inverse=False, + ) + + info = { + "lidar_path": Path(ref_lidar_path).relative_to(data_path).__str__(), + "ref_from_car": ref_from_car, + "ref_to_car": ref_to_car, + 'token': sample['token'], + 'car_from_global': car_from_global, + 'car_to_global': car_to_global, + 'timestamp': ref_time, + 'sweeps': [] + } + + sample_data_token = sample['data'][ref_chan] + curr_sd_rec = lyft.get('sample_data', sample_data_token) + sweeps = [] + + while len(sweeps) < max_sweeps - 1: + if curr_sd_rec['prev'] == '': + if len(sweeps) == 0: + sweep = { + 'lidar_path': Path(ref_lidar_path).relative_to(data_path).__str__(), + 'sample_data_token': curr_sd_rec['token'], + 'transform_matrix': None, + 'time_lag': curr_sd_rec['timestamp'] * 0, + } + sweeps.append(sweep) + else: + sweeps.append(sweeps[-1]) + else: + curr_sd_rec = lyft.get('sample_data', curr_sd_rec['prev']) + + # Get past pose + current_pose_rec = lyft.get('ego_pose', curr_sd_rec['ego_pose_token']) + global_from_car = transform_matrix( + current_pose_rec['translation'], Quaternion(current_pose_rec['rotation']), inverse=False, + ) + + # Homogeneous transformation matrix from sensor coordinate frame to ego car frame. + current_cs_rec = lyft.get( + 'calibrated_sensor', curr_sd_rec['calibrated_sensor_token'] + ) + car_from_current = transform_matrix( + current_cs_rec['translation'], Quaternion(current_cs_rec['rotation']), inverse=False, + ) + + tm = reduce(np.dot, [ref_from_car, car_from_global, global_from_car, car_from_current]) + + lidar_path = lyft.get_sample_data_path(curr_sd_rec['token']) + + time_lag = ref_time - 1e-6 * curr_sd_rec['timestamp'] + + sweep = { + 'lidar_path': Path(lidar_path).relative_to(data_path).__str__(), + 'sample_data_token': curr_sd_rec['token'], + 'transform_matrix': tm, + 'global_from_car': global_from_car, + 'car_from_current': car_from_current, + 'time_lag': time_lag, + } + sweeps.append(sweep) + + info['sweeps'] = sweeps + + if not test: + annotations = [ + lyft.get("sample_annotation", token) for token in sample["anns"] + ] + + locs = np.array([b.center for b in ref_boxes]).reshape(-1, 3) + dims = np.array([b.wlh for b in ref_boxes]).reshape(-1, 3)[:, [1, 0, 2]] + rots = np.array([quaternion_yaw(b.orientation) for b in ref_boxes]).reshape( + -1, 1 + ) + velocity = np.array([b.velocity for b in ref_boxes]).reshape(-1, 3) + names = np.array([b.name for b in ref_boxes]) + tokens = np.array([b.token for b in ref_boxes]).reshape(-1, 1) + gt_boxes = np.concatenate([locs, dims, rots], axis=1) + + assert len(annotations) == len(gt_boxes) + + info["gt_boxes"] = gt_boxes + info["gt_boxes_velocity"] = velocity + info["gt_names"] = names + info["gt_boxes_token"] = tokens + + if sample["scene_token"] in train_scenes: + train_lyft_infos.append(info) + else: + val_lyft_infos.append(info) + + progress_bar.close() + return train_lyft_infos, val_lyft_infos + +def boxes_lidar_to_lyft(boxes3d, scores=None, labels=None): + box_list = [] + for k in range(boxes3d.shape[0]): + quat = Quaternion(axis=[0, 0, 1], radians=boxes3d[k, 6]) + box = Box( + boxes3d[k, :3], + boxes3d[k, [4, 3, 5]], # wlh + quat, label=labels[k] if labels is not None else np.nan, + score=scores[k] if scores is not None else np.nan, + ) + box_list.append(box) + return box_list + + +def lidar_lyft_box_to_global(lyft, boxes, sample_token): + s_record = lyft.get('sample', sample_token) + sample_data_token = s_record['data']['LIDAR_TOP'] + + sd_record = lyft.get('sample_data', sample_data_token) + cs_record = lyft.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + sensor_record = lyft.get('sensor', cs_record['sensor_token']) + pose_record = lyft.get('ego_pose', sd_record['ego_pose_token']) + + box_list = [] + for box in boxes: + # Move box to ego vehicle coord system + box.rotate(Quaternion(cs_record['rotation'])) + box.translate(np.array(cs_record['translation'])) + # Move box to global coord system + box.rotate(Quaternion(pose_record['rotation'])) + box.translate(np.array(pose_record['translation'])) + box_list.append(box) + return box_list + + +def convert_det_to_lyft_format(lyft, det_annos): + sample_tokens = [] + det_lyft_box = [] + for anno in det_annos: + sample_tokens.append(anno['metadata']['token']) + + boxes_lyft_list = boxes_lidar_to_lyft(anno['boxes_lidar'], anno['score'], anno['pred_labels']) + boxes_list = lidar_lyft_box_to_global(lyft, boxes_lyft_list, anno['metadata']['token']) + + for idx, box in enumerate(boxes_list): + name = anno['name'][idx] + box3d = { + 'sample_token': anno['metadata']['token'], + 'translation': box.center.tolist(), + 'size': box.wlh.tolist(), + 'rotation': box.orientation.elements.tolist(), + 'name': name, + 'score': box.score + } + det_lyft_box.append(box3d) + + return det_lyft_box, sample_tokens + + +def load_lyft_gt_by_tokens(lyft, sample_tokens): + """ + Modify from Lyft tutorial + """ + + gt_box3ds = [] + + # Load annotations and filter predictions and annotations. + for sample_token in sample_tokens: + + sample = lyft.get('sample', sample_token) + + sample_annotation_tokens = sample['anns'] + + sample_lidar_token = sample["data"]["LIDAR_TOP"] + lidar_data = lyft.get("sample_data", sample_lidar_token) + ego_pose = lyft.get("ego_pose", lidar_data["ego_pose_token"]) + ego_translation = np.array(ego_pose['translation']) + + for sample_annotation_token in sample_annotation_tokens: + sample_annotation = lyft.get('sample_annotation', sample_annotation_token) + sample_annotation_translation = sample_annotation['translation'] + + class_name = sample_annotation['category_name'] + + box3d = { + 'sample_token': sample_token, + 'translation': sample_annotation_translation, + 'size': sample_annotation['size'], + 'rotation': sample_annotation['rotation'], + 'name': class_name + } + gt_box3ds.append(box3d) + + return gt_box3ds + + +def format_lyft_results(classwise_ap, class_names, iou_threshold_list, version='trainval'): + ret_dict = {} + result = '----------------Lyft %s results-----------------\n' % version + result += 'Average precision over IoUs: {}\n'.format(str(iou_threshold_list)) + for c_idx, class_name in enumerate(class_names): + result += '{:<20}: \t {:.4f}\n'.format(class_name, classwise_ap[c_idx]) + ret_dict[class_name] = classwise_ap[c_idx] + + result += '--------------average performance-------------\n' + mAP = np.mean(classwise_ap) + result += 'mAP:\t {:.4f}\n'.format(mAP) + + ret_dict['mAP'] = mAP + return result, ret_dict diff --git a/toolbox/openpcdet/pcdet/datasets/nuscenes/__init__.py b/toolbox/openpcdet/pcdet/datasets/nuscenes/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/datasets/nuscenes/nuscenes_dataset.py b/toolbox/openpcdet/pcdet/datasets/nuscenes/nuscenes_dataset.py new file mode 100644 index 000000000..0f7000562 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/nuscenes/nuscenes_dataset.py @@ -0,0 +1,434 @@ +import copy +import pickle +from pathlib import Path + +import numpy as np +from tqdm import tqdm + +from ...ops.roiaware_pool3d import roiaware_pool3d_utils +from ...utils import common_utils +from ..dataset import DatasetTemplate +from pyquaternion import Quaternion +from PIL import Image + + +class NuScenesDataset(DatasetTemplate): + def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None): + root_path = (root_path if root_path is not None else Path(dataset_cfg.DATA_PATH)) / dataset_cfg.VERSION + super().__init__( + dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger + ) + self.infos = [] + self.camera_config = self.dataset_cfg.get('CAMERA_CONFIG', None) + if self.camera_config is not None: + self.use_camera = self.camera_config.get('USE_CAMERA', True) + self.camera_image_config = self.camera_config.IMAGE + else: + self.use_camera = False + + self.include_nuscenes_data(self.mode) + if self.training and self.dataset_cfg.get('BALANCED_RESAMPLING', False): + self.infos = self.balanced_infos_resampling(self.infos) + + def include_nuscenes_data(self, mode): + self.logger.info('Loading NuScenes dataset') + nuscenes_infos = [] + + for info_path in self.dataset_cfg.INFO_PATH[mode]: + info_path = self.root_path / info_path + if not info_path.exists(): + continue + with open(info_path, 'rb') as f: + infos = pickle.load(f) + nuscenes_infos.extend(infos) + + self.infos.extend(nuscenes_infos) + self.logger.info('Total samples for NuScenes dataset: %d' % (len(nuscenes_infos))) + + def balanced_infos_resampling(self, infos): + """ + Class-balanced sampling of nuScenes dataset from https://arxiv.org/abs/1908.09492 + """ + if self.class_names is None: + return infos + + cls_infos = {name: [] for name in self.class_names} + for info in infos: + for name in set(info['gt_names']): + if name in self.class_names: + cls_infos[name].append(info) + + duplicated_samples = sum([len(v) for _, v in cls_infos.items()]) + cls_dist = {k: len(v) / duplicated_samples for k, v in cls_infos.items()} + + sampled_infos = [] + + frac = 1.0 / len(self.class_names) + ratios = [frac / v for v in cls_dist.values()] + + for cur_cls_infos, ratio in zip(list(cls_infos.values()), ratios): + sampled_infos += np.random.choice( + cur_cls_infos, int(len(cur_cls_infos) * ratio) + ).tolist() + self.logger.info('Total samples after balanced resampling: %s' % (len(sampled_infos))) + + cls_infos_new = {name: [] for name in self.class_names} + for info in sampled_infos: + for name in set(info['gt_names']): + if name in self.class_names: + cls_infos_new[name].append(info) + + cls_dist_new = {k: len(v) / len(sampled_infos) for k, v in cls_infos_new.items()} + + return sampled_infos + + def get_sweep(self, sweep_info): + def remove_ego_points(points, center_radius=1.0): + mask = ~((np.abs(points[:, 0]) < center_radius) & (np.abs(points[:, 1]) < center_radius)) + return points[mask] + + lidar_path = self.root_path / sweep_info['lidar_path'] + points_sweep = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4] + points_sweep = remove_ego_points(points_sweep).T + if sweep_info['transform_matrix'] is not None: + num_points = points_sweep.shape[1] + points_sweep[:3, :] = sweep_info['transform_matrix'].dot( + np.vstack((points_sweep[:3, :], np.ones(num_points))))[:3, :] + + cur_times = sweep_info['time_lag'] * np.ones((1, points_sweep.shape[1])) + return points_sweep.T, cur_times.T + + def get_lidar_with_sweeps(self, index, max_sweeps=1): + info = self.infos[index] + lidar_path = self.root_path / info['lidar_path'] + points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])[:, :4] + + sweep_points_list = [points] + sweep_times_list = [np.zeros((points.shape[0], 1))] + + for k in np.random.choice(len(info['sweeps']), max_sweeps - 1, replace=False): + points_sweep, times_sweep = self.get_sweep(info['sweeps'][k]) + sweep_points_list.append(points_sweep) + sweep_times_list.append(times_sweep) + + points = np.concatenate(sweep_points_list, axis=0) + times = np.concatenate(sweep_times_list, axis=0).astype(points.dtype) + + points = np.concatenate((points, times), axis=1) + return points + + def crop_image(self, input_dict): + W, H = input_dict["ori_shape"] + imgs = input_dict["camera_imgs"] + img_process_infos = [] + crop_images = [] + for img in imgs: + if self.training == True: + fH, fW = self.camera_image_config.FINAL_DIM + resize_lim = self.camera_image_config.RESIZE_LIM_TRAIN + resize = np.random.uniform(*resize_lim) + resize_dims = (int(W * resize), int(H * resize)) + newW, newH = resize_dims + crop_h = newH - fH + crop_w = int(np.random.uniform(0, max(0, newW - fW))) + crop = (crop_w, crop_h, crop_w + fW, crop_h + fH) + else: + fH, fW = self.camera_image_config.FINAL_DIM + resize_lim = self.camera_image_config.RESIZE_LIM_TEST + resize = np.mean(resize_lim) + resize_dims = (int(W * resize), int(H * resize)) + newW, newH = resize_dims + crop_h = newH - fH + crop_w = int(max(0, newW - fW) / 2) + crop = (crop_w, crop_h, crop_w + fW, crop_h + fH) + + # reisze and crop image + img = img.resize(resize_dims) + img = img.crop(crop) + crop_images.append(img) + img_process_infos.append([resize, crop, False, 0]) + + input_dict['img_process_infos'] = img_process_infos + input_dict['camera_imgs'] = crop_images + return input_dict + + def load_camera_info(self, input_dict, info): + input_dict["image_paths"] = [] + input_dict["lidar2camera"] = [] + input_dict["lidar2image"] = [] + input_dict["camera2ego"] = [] + input_dict["camera_intrinsics"] = [] + input_dict["camera2lidar"] = [] + + for _, camera_info in info["cams"].items(): + input_dict["image_paths"].append(camera_info["data_path"]) + + # lidar to camera transform + lidar2camera_r = np.linalg.inv(camera_info["sensor2lidar_rotation"]) + lidar2camera_t = ( + camera_info["sensor2lidar_translation"] @ lidar2camera_r.T + ) + lidar2camera_rt = np.eye(4).astype(np.float32) + lidar2camera_rt[:3, :3] = lidar2camera_r.T + lidar2camera_rt[3, :3] = -lidar2camera_t + input_dict["lidar2camera"].append(lidar2camera_rt.T) + + # camera intrinsics + camera_intrinsics = np.eye(4).astype(np.float32) + camera_intrinsics[:3, :3] = camera_info["camera_intrinsics"] + input_dict["camera_intrinsics"].append(camera_intrinsics) + + # lidar to image transform + lidar2image = camera_intrinsics @ lidar2camera_rt.T + input_dict["lidar2image"].append(lidar2image) + + # camera to ego transform + camera2ego = np.eye(4).astype(np.float32) + camera2ego[:3, :3] = Quaternion( + camera_info["sensor2ego_rotation"] + ).rotation_matrix + camera2ego[:3, 3] = camera_info["sensor2ego_translation"] + input_dict["camera2ego"].append(camera2ego) + + # camera to lidar transform + camera2lidar = np.eye(4).astype(np.float32) + camera2lidar[:3, :3] = camera_info["sensor2lidar_rotation"] + camera2lidar[:3, 3] = camera_info["sensor2lidar_translation"] + input_dict["camera2lidar"].append(camera2lidar) + # read image + filename = input_dict["image_paths"] + images = [] + for name in filename: + images.append(Image.open(str(self.root_path / name))) + + input_dict["camera_imgs"] = images + input_dict["ori_shape"] = images[0].size + + # resize and crop image + input_dict = self.crop_image(input_dict) + + return input_dict + + def __len__(self): + if self._merge_all_iters_to_one_epoch: + return len(self.infos) * self.total_epochs + + return len(self.infos) + + def __getitem__(self, index): + if self._merge_all_iters_to_one_epoch: + index = index % len(self.infos) + + info = copy.deepcopy(self.infos[index]) + points = self.get_lidar_with_sweeps(index, max_sweeps=self.dataset_cfg.MAX_SWEEPS) + + input_dict = { + 'points': points, + 'frame_id': Path(info['lidar_path']).stem, + 'metadata': {'token': info['token']} + } + + if 'gt_boxes' in info: + if self.dataset_cfg.get('FILTER_MIN_POINTS_IN_GT', False): + mask = (info['num_lidar_pts'] > self.dataset_cfg.FILTER_MIN_POINTS_IN_GT - 1) + else: + mask = None + + input_dict.update({ + 'gt_names': info['gt_names'] if mask is None else info['gt_names'][mask], + 'gt_boxes': info['gt_boxes'] if mask is None else info['gt_boxes'][mask] + }) + if self.use_camera: + input_dict = self.load_camera_info(input_dict, info) + + data_dict = self.prepare_data(data_dict=input_dict) + + if self.dataset_cfg.get('SET_NAN_VELOCITY_TO_ZEROS', False) and 'gt_boxes' in info: + gt_boxes = data_dict['gt_boxes'] + gt_boxes[np.isnan(gt_boxes)] = 0 + data_dict['gt_boxes'] = gt_boxes + + if not self.dataset_cfg.PRED_VELOCITY and 'gt_boxes' in data_dict: + data_dict['gt_boxes'] = data_dict['gt_boxes'][:, [0, 1, 2, 3, 4, 5, 6, -1]] + + return data_dict + + def evaluation(self, det_annos, class_names, **kwargs): + import json + from nuscenes.nuscenes import NuScenes + from . import nuscenes_utils + nusc = NuScenes(version=self.dataset_cfg.VERSION, dataroot=str(self.root_path), verbose=True) + nusc_annos = nuscenes_utils.transform_det_annos_to_nusc_annos(det_annos, nusc) + nusc_annos['meta'] = { + 'use_camera': False, + 'use_lidar': True, + 'use_radar': False, + 'use_map': False, + 'use_external': False, + } + + output_path = Path(kwargs['output_path']) + output_path.mkdir(exist_ok=True, parents=True) + res_path = str(output_path / 'results_nusc.json') + with open(res_path, 'w') as f: + json.dump(nusc_annos, f) + + self.logger.info(f'The predictions of NuScenes have been saved to {res_path}') + + if self.dataset_cfg.VERSION == 'v1.0-test': + return 'No ground-truth annotations for evaluation', {} + + from nuscenes.eval.detection.config import config_factory + from nuscenes.eval.detection.evaluate import NuScenesEval + + eval_set_map = { + 'v1.0-mini': 'mini_val', + 'v1.0-trainval': 'val', + 'v1.0-test': 'test' + } + try: + eval_version = 'detection_cvpr_2019' + eval_config = config_factory(eval_version) + except: + eval_version = 'cvpr_2019' + eval_config = config_factory(eval_version) + + nusc_eval = NuScenesEval( + nusc, + config=eval_config, + result_path=res_path, + eval_set=eval_set_map[self.dataset_cfg.VERSION], + output_dir=str(output_path), + verbose=True, + ) + metrics_summary = nusc_eval.main(plot_examples=0, render_curves=False) + + with open(output_path / 'metrics_summary.json', 'r') as f: + metrics = json.load(f) + + result_str, result_dict = nuscenes_utils.format_nuscene_results(metrics, self.class_names, version=eval_version) + return result_str, result_dict + + def create_groundtruth_database(self, used_classes=None, max_sweeps=10): + import torch + + database_save_path = self.root_path / f'gt_database_{max_sweeps}sweeps_withvelo' + db_info_save_path = self.root_path / f'nuscenes_dbinfos_{max_sweeps}sweeps_withvelo.pkl' + + database_save_path.mkdir(parents=True, exist_ok=True) + all_db_infos = {} + + for idx in tqdm(range(len(self.infos))): + sample_idx = idx + info = self.infos[idx] + points = self.get_lidar_with_sweeps(idx, max_sweeps=max_sweeps) + gt_boxes = info['gt_boxes'] + gt_names = info['gt_names'] + + box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu( + torch.from_numpy(points[:, 0:3]).unsqueeze(dim=0).float().cuda(), + torch.from_numpy(gt_boxes[:, 0:7]).unsqueeze(dim=0).float().cuda() + ).long().squeeze(dim=0).cpu().numpy() + + for i in range(gt_boxes.shape[0]): + filename = '%s_%s_%d.bin' % (sample_idx, gt_names[i], i) + filepath = database_save_path / filename + gt_points = points[box_idxs_of_pts == i] + + gt_points[:, :3] -= gt_boxes[i, :3] + with open(filepath, 'w') as f: + gt_points.tofile(f) + + if (used_classes is None) or gt_names[i] in used_classes: + db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin + db_info = {'name': gt_names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i, + 'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0]} + if gt_names[i] in all_db_infos: + all_db_infos[gt_names[i]].append(db_info) + else: + all_db_infos[gt_names[i]] = [db_info] + for k, v in all_db_infos.items(): + print('Database %s: %d' % (k, len(v))) + + with open(db_info_save_path, 'wb') as f: + pickle.dump(all_db_infos, f) + + +def create_nuscenes_info(version, data_path, save_path, max_sweeps=10, with_cam=False): + from nuscenes.nuscenes import NuScenes + from nuscenes.utils import splits + from . import nuscenes_utils + data_path = data_path / version + save_path = save_path / version + + assert version in ['v1.0-trainval', 'v1.0-test', 'v1.0-mini'] + if version == 'v1.0-trainval': + train_scenes = splits.train + val_scenes = splits.val + elif version == 'v1.0-test': + train_scenes = splits.test + val_scenes = [] + elif version == 'v1.0-mini': + train_scenes = splits.mini_train + val_scenes = splits.mini_val + else: + raise NotImplementedError + + nusc = NuScenes(version=version, dataroot=data_path, verbose=True) + available_scenes = nuscenes_utils.get_available_scenes(nusc) + available_scene_names = [s['name'] for s in available_scenes] + train_scenes = list(filter(lambda x: x in available_scene_names, train_scenes)) + val_scenes = list(filter(lambda x: x in available_scene_names, val_scenes)) + train_scenes = set([available_scenes[available_scene_names.index(s)]['token'] for s in train_scenes]) + val_scenes = set([available_scenes[available_scene_names.index(s)]['token'] for s in val_scenes]) + + print('%s: train scene(%d), val scene(%d)' % (version, len(train_scenes), len(val_scenes))) + + train_nusc_infos, val_nusc_infos = nuscenes_utils.fill_trainval_infos( + data_path=data_path, nusc=nusc, train_scenes=train_scenes, val_scenes=val_scenes, + test='test' in version, max_sweeps=max_sweeps, with_cam=with_cam + ) + + if version == 'v1.0-test': + print('test sample: %d' % len(train_nusc_infos)) + with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_test.pkl', 'wb') as f: + pickle.dump(train_nusc_infos, f) + else: + print('train sample: %d, val sample: %d' % (len(train_nusc_infos), len(val_nusc_infos))) + with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_train.pkl', 'wb') as f: + pickle.dump(train_nusc_infos, f) + with open(save_path / f'nuscenes_infos_{max_sweeps}sweeps_val.pkl', 'wb') as f: + pickle.dump(val_nusc_infos, f) + + +if __name__ == '__main__': + import yaml + import argparse + from pathlib import Path + from easydict import EasyDict + + parser = argparse.ArgumentParser(description='arg parser') + parser.add_argument('--cfg_file', type=str, default=None, help='specify the config of dataset') + parser.add_argument('--func', type=str, default='create_nuscenes_infos', help='') + parser.add_argument('--version', type=str, default='v1.0-trainval', help='') + parser.add_argument('--with_cam', action='store_true', default=False, help='use camera or not') + args = parser.parse_args() + + if args.func == 'create_nuscenes_infos': + dataset_cfg = EasyDict(yaml.safe_load(open(args.cfg_file))) + ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve() + dataset_cfg.VERSION = args.version + create_nuscenes_info( + version=dataset_cfg.VERSION, + data_path=ROOT_DIR / 'data' / 'nuscenes', + save_path=ROOT_DIR / 'data' / 'nuscenes', + max_sweeps=dataset_cfg.MAX_SWEEPS, + with_cam=args.with_cam + ) + + nuscenes_dataset = NuScenesDataset( + dataset_cfg=dataset_cfg, class_names=None, + root_path=ROOT_DIR / 'data' / 'nuscenes', + logger=common_utils.create_logger(), training=True + ) + nuscenes_dataset.create_groundtruth_database(max_sweeps=dataset_cfg.MAX_SWEEPS) diff --git a/toolbox/openpcdet/pcdet/datasets/nuscenes/nuscenes_utils.py b/toolbox/openpcdet/pcdet/datasets/nuscenes/nuscenes_utils.py new file mode 100644 index 000000000..4e2421d15 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/nuscenes/nuscenes_utils.py @@ -0,0 +1,588 @@ +""" +The NuScenes data pre-processing and evaluation is modified from +https://github.com/traveller59/second.pytorch and https://github.com/poodarchu/Det3D +""" + +import operator +from functools import reduce +from pathlib import Path + +import numpy as np +import tqdm +from nuscenes.utils.data_classes import Box +from nuscenes.utils.geometry_utils import transform_matrix +from pyquaternion import Quaternion + +map_name_from_general_to_detection = { + 'human.pedestrian.adult': 'pedestrian', + 'human.pedestrian.child': 'pedestrian', + 'human.pedestrian.wheelchair': 'ignore', + 'human.pedestrian.stroller': 'ignore', + 'human.pedestrian.personal_mobility': 'ignore', + 'human.pedestrian.police_officer': 'pedestrian', + 'human.pedestrian.construction_worker': 'pedestrian', + 'animal': 'ignore', + 'vehicle.car': 'car', + 'vehicle.motorcycle': 'motorcycle', + 'vehicle.bicycle': 'bicycle', + 'vehicle.bus.bendy': 'bus', + 'vehicle.bus.rigid': 'bus', + 'vehicle.truck': 'truck', + 'vehicle.construction': 'construction_vehicle', + 'vehicle.emergency.ambulance': 'ignore', + 'vehicle.emergency.police': 'ignore', + 'vehicle.trailer': 'trailer', + 'movable_object.barrier': 'barrier', + 'movable_object.trafficcone': 'traffic_cone', + 'movable_object.pushable_pullable': 'ignore', + 'movable_object.debris': 'ignore', + 'static_object.bicycle_rack': 'ignore', +} + + +cls_attr_dist = { + 'barrier': { + 'cycle.with_rider': 0, + 'cycle.without_rider': 0, + 'pedestrian.moving': 0, + 'pedestrian.sitting_lying_down': 0, + 'pedestrian.standing': 0, + 'vehicle.moving': 0, + 'vehicle.parked': 0, + 'vehicle.stopped': 0, + }, + 'bicycle': { + 'cycle.with_rider': 2791, + 'cycle.without_rider': 8946, + 'pedestrian.moving': 0, + 'pedestrian.sitting_lying_down': 0, + 'pedestrian.standing': 0, + 'vehicle.moving': 0, + 'vehicle.parked': 0, + 'vehicle.stopped': 0, + }, + 'bus': { + 'cycle.with_rider': 0, + 'cycle.without_rider': 0, + 'pedestrian.moving': 0, + 'pedestrian.sitting_lying_down': 0, + 'pedestrian.standing': 0, + 'vehicle.moving': 9092, + 'vehicle.parked': 3294, + 'vehicle.stopped': 3881, + }, + 'car': { + 'cycle.with_rider': 0, + 'cycle.without_rider': 0, + 'pedestrian.moving': 0, + 'pedestrian.sitting_lying_down': 0, + 'pedestrian.standing': 0, + 'vehicle.moving': 114304, + 'vehicle.parked': 330133, + 'vehicle.stopped': 46898, + }, + 'construction_vehicle': { + 'cycle.with_rider': 0, + 'cycle.without_rider': 0, + 'pedestrian.moving': 0, + 'pedestrian.sitting_lying_down': 0, + 'pedestrian.standing': 0, + 'vehicle.moving': 882, + 'vehicle.parked': 11549, + 'vehicle.stopped': 2102, + }, + 'ignore': { + 'cycle.with_rider': 307, + 'cycle.without_rider': 73, + 'pedestrian.moving': 0, + 'pedestrian.sitting_lying_down': 0, + 'pedestrian.standing': 0, + 'vehicle.moving': 165, + 'vehicle.parked': 400, + 'vehicle.stopped': 102, + }, + 'motorcycle': { + 'cycle.with_rider': 4233, + 'cycle.without_rider': 8326, + 'pedestrian.moving': 0, + 'pedestrian.sitting_lying_down': 0, + 'pedestrian.standing': 0, + 'vehicle.moving': 0, + 'vehicle.parked': 0, + 'vehicle.stopped': 0, + }, + 'pedestrian': { + 'cycle.with_rider': 0, + 'cycle.without_rider': 0, + 'pedestrian.moving': 157444, + 'pedestrian.sitting_lying_down': 13939, + 'pedestrian.standing': 46530, + 'vehicle.moving': 0, + 'vehicle.parked': 0, + 'vehicle.stopped': 0, + }, + 'traffic_cone': { + 'cycle.with_rider': 0, + 'cycle.without_rider': 0, + 'pedestrian.moving': 0, + 'pedestrian.sitting_lying_down': 0, + 'pedestrian.standing': 0, + 'vehicle.moving': 0, + 'vehicle.parked': 0, + 'vehicle.stopped': 0, + }, + 'trailer': { + 'cycle.with_rider': 0, + 'cycle.without_rider': 0, + 'pedestrian.moving': 0, + 'pedestrian.sitting_lying_down': 0, + 'pedestrian.standing': 0, + 'vehicle.moving': 3421, + 'vehicle.parked': 19224, + 'vehicle.stopped': 1895, + }, + 'truck': { + 'cycle.with_rider': 0, + 'cycle.without_rider': 0, + 'pedestrian.moving': 0, + 'pedestrian.sitting_lying_down': 0, + 'pedestrian.standing': 0, + 'vehicle.moving': 21339, + 'vehicle.parked': 55626, + 'vehicle.stopped': 11097, + }, +} + + +def get_available_scenes(nusc): + available_scenes = [] + print('total scene num:', len(nusc.scene)) + for scene in nusc.scene: + scene_token = scene['token'] + scene_rec = nusc.get('scene', scene_token) + sample_rec = nusc.get('sample', scene_rec['first_sample_token']) + sd_rec = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP']) + has_more_frames = True + scene_not_exist = False + while has_more_frames: + lidar_path, boxes, _ = nusc.get_sample_data(sd_rec['token']) + if not Path(lidar_path).exists(): + scene_not_exist = True + break + else: + break + # if not sd_rec['next'] == '': + # sd_rec = nusc.get('sample_data', sd_rec['next']) + # else: + # has_more_frames = False + if scene_not_exist: + continue + available_scenes.append(scene) + print('exist scene num:', len(available_scenes)) + return available_scenes + + +def get_sample_data(nusc, sample_data_token, selected_anntokens=None): + """ + Returns the data path as well as all annotations related to that sample_data. + Note that the boxes are transformed into the current sensor's coordinate frame. + Args: + nusc: + sample_data_token: Sample_data token. + selected_anntokens: If provided only return the selected annotation. + + Returns: + + """ + # Retrieve sensor & pose records + sd_record = nusc.get('sample_data', sample_data_token) + cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + sensor_record = nusc.get('sensor', cs_record['sensor_token']) + pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + + data_path = nusc.get_sample_data_path(sample_data_token) + + if sensor_record['modality'] == 'camera': + cam_intrinsic = np.array(cs_record['camera_intrinsic']) + imsize = (sd_record['width'], sd_record['height']) + else: + cam_intrinsic = imsize = None + + # Retrieve all sample annotations and map to sensor coordinate system. + if selected_anntokens is not None: + boxes = list(map(nusc.get_box, selected_anntokens)) + else: + boxes = nusc.get_boxes(sample_data_token) + + # Make list of Box objects including coord system transforms. + box_list = [] + for box in boxes: + box.velocity = nusc.box_velocity(box.token) + # Move box to ego vehicle coord system + box.translate(-np.array(pose_record['translation'])) + box.rotate(Quaternion(pose_record['rotation']).inverse) + + # Move box to sensor coord system + box.translate(-np.array(cs_record['translation'])) + box.rotate(Quaternion(cs_record['rotation']).inverse) + + box_list.append(box) + + return data_path, box_list, cam_intrinsic + + +def quaternion_yaw(q: Quaternion) -> float: + """ + Calculate the yaw angle from a quaternion. + Note that this only works for a quaternion that represents a box in lidar or global coordinate frame. + It does not work for a box in the camera frame. + :param q: Quaternion of interest. + :return: Yaw angle in radians. + """ + + # Project into xy plane. + v = np.dot(q.rotation_matrix, np.array([1, 0, 0])) + + # Measure yaw using arctan. + yaw = np.arctan2(v[1], v[0]) + + return yaw + + +def obtain_sensor2top( + nusc, sensor_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, sensor_type="lidar" +): + """Obtain the info with RT matric from general sensor to Top LiDAR. + + Args: + nusc (class): Dataset class in the nuScenes dataset. + sensor_token (str): Sample data token corresponding to the + specific sensor type. + l2e_t (np.ndarray): Translation from lidar to ego in shape (1, 3). + l2e_r_mat (np.ndarray): Rotation matrix from lidar to ego + in shape (3, 3). + e2g_t (np.ndarray): Translation from ego to global in shape (1, 3). + e2g_r_mat (np.ndarray): Rotation matrix from ego to global + in shape (3, 3). + sensor_type (str): Sensor to calibrate. Default: 'lidar'. + + Returns: + sweep (dict): Sweep information after transformation. + """ + sd_rec = nusc.get("sample_data", sensor_token) + cs_record = nusc.get("calibrated_sensor", sd_rec["calibrated_sensor_token"]) + pose_record = nusc.get("ego_pose", sd_rec["ego_pose_token"]) + data_path = str(nusc.get_sample_data_path(sd_rec["token"])) + # if os.getcwd() in data_path: # path from lyftdataset is absolute path + # data_path = data_path.split(f"{os.getcwd()}/")[-1] # relative path + sweep = { + "data_path": data_path, + "type": sensor_type, + "sample_data_token": sd_rec["token"], + "sensor2ego_translation": cs_record["translation"], + "sensor2ego_rotation": cs_record["rotation"], + "ego2global_translation": pose_record["translation"], + "ego2global_rotation": pose_record["rotation"], + "timestamp": sd_rec["timestamp"], + } + l2e_r_s = sweep["sensor2ego_rotation"] + l2e_t_s = sweep["sensor2ego_translation"] + e2g_r_s = sweep["ego2global_rotation"] + e2g_t_s = sweep["ego2global_translation"] + + # obtain the RT from sensor to Top LiDAR + # sweep->ego->global->ego'->lidar + l2e_r_s_mat = Quaternion(l2e_r_s).rotation_matrix + e2g_r_s_mat = Quaternion(e2g_r_s).rotation_matrix + R = (l2e_r_s_mat.T @ e2g_r_s_mat.T) @ ( + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T + ) + T = (l2e_t_s @ e2g_r_s_mat.T + e2g_t_s) @ ( + np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T + ) + T -= ( + e2g_t @ (np.linalg.inv(e2g_r_mat).T @ np.linalg.inv(l2e_r_mat).T) + + l2e_t @ np.linalg.inv(l2e_r_mat).T + ).squeeze(0) + sweep["sensor2lidar_rotation"] = R.T # points @ R.T + T + sweep["sensor2lidar_translation"] = T + return sweep + + +def fill_trainval_infos(data_path, nusc, train_scenes, val_scenes, test=False, max_sweeps=10, with_cam=False): + train_nusc_infos = [] + val_nusc_infos = [] + progress_bar = tqdm.tqdm(total=len(nusc.sample), desc='create_info', dynamic_ncols=True) + + ref_chan = 'LIDAR_TOP' # The radar channel from which we track back n sweeps to aggregate the point cloud. + chan = 'LIDAR_TOP' # The reference channel of the current sample_rec that the point clouds are mapped to. + + for index, sample in enumerate(nusc.sample): + progress_bar.update() + + ref_sd_token = sample['data'][ref_chan] + ref_sd_rec = nusc.get('sample_data', ref_sd_token) + ref_cs_rec = nusc.get('calibrated_sensor', ref_sd_rec['calibrated_sensor_token']) + ref_pose_rec = nusc.get('ego_pose', ref_sd_rec['ego_pose_token']) + ref_time = 1e-6 * ref_sd_rec['timestamp'] + + ref_lidar_path, ref_boxes, _ = get_sample_data(nusc, ref_sd_token) + + ref_cam_front_token = sample['data']['CAM_FRONT'] + ref_cam_path, _, ref_cam_intrinsic = nusc.get_sample_data(ref_cam_front_token) + + # Homogeneous transform from ego car frame to reference frame + ref_from_car = transform_matrix( + ref_cs_rec['translation'], Quaternion(ref_cs_rec['rotation']), inverse=True + ) + + # Homogeneous transformation matrix from global to _current_ ego car frame + car_from_global = transform_matrix( + ref_pose_rec['translation'], Quaternion(ref_pose_rec['rotation']), inverse=True, + ) + + info = { + 'lidar_path': Path(ref_lidar_path).relative_to(data_path).__str__(), + 'cam_front_path': Path(ref_cam_path).relative_to(data_path).__str__(), + 'cam_intrinsic': ref_cam_intrinsic, + 'token': sample['token'], + 'sweeps': [], + 'ref_from_car': ref_from_car, + 'car_from_global': car_from_global, + 'timestamp': ref_time, + } + if with_cam: + info['cams'] = dict() + l2e_r = ref_cs_rec["rotation"] + l2e_t = ref_cs_rec["translation"], + e2g_r = ref_pose_rec["rotation"] + e2g_t = ref_pose_rec["translation"] + l2e_r_mat = Quaternion(l2e_r).rotation_matrix + e2g_r_mat = Quaternion(e2g_r).rotation_matrix + + # obtain 6 image's information per frame + camera_types = [ + "CAM_FRONT", + "CAM_FRONT_RIGHT", + "CAM_FRONT_LEFT", + "CAM_BACK", + "CAM_BACK_LEFT", + "CAM_BACK_RIGHT", + ] + for cam in camera_types: + cam_token = sample["data"][cam] + cam_path, _, camera_intrinsics = nusc.get_sample_data(cam_token) + cam_info = obtain_sensor2top( + nusc, cam_token, l2e_t, l2e_r_mat, e2g_t, e2g_r_mat, cam + ) + cam_info['data_path'] = Path(cam_info['data_path']).relative_to(data_path).__str__() + cam_info.update(camera_intrinsics=camera_intrinsics) + info["cams"].update({cam: cam_info}) + + + sample_data_token = sample['data'][chan] + curr_sd_rec = nusc.get('sample_data', sample_data_token) + sweeps = [] + while len(sweeps) < max_sweeps - 1: + if curr_sd_rec['prev'] == '': + if len(sweeps) == 0: + sweep = { + 'lidar_path': Path(ref_lidar_path).relative_to(data_path).__str__(), + 'sample_data_token': curr_sd_rec['token'], + 'transform_matrix': None, + 'time_lag': curr_sd_rec['timestamp'] * 0, + } + sweeps.append(sweep) + else: + sweeps.append(sweeps[-1]) + else: + curr_sd_rec = nusc.get('sample_data', curr_sd_rec['prev']) + + # Get past pose + current_pose_rec = nusc.get('ego_pose', curr_sd_rec['ego_pose_token']) + global_from_car = transform_matrix( + current_pose_rec['translation'], Quaternion(current_pose_rec['rotation']), inverse=False, + ) + + # Homogeneous transformation matrix from sensor coordinate frame to ego car frame. + current_cs_rec = nusc.get( + 'calibrated_sensor', curr_sd_rec['calibrated_sensor_token'] + ) + car_from_current = transform_matrix( + current_cs_rec['translation'], Quaternion(current_cs_rec['rotation']), inverse=False, + ) + + tm = reduce(np.dot, [ref_from_car, car_from_global, global_from_car, car_from_current]) + + lidar_path = nusc.get_sample_data_path(curr_sd_rec['token']) + + time_lag = ref_time - 1e-6 * curr_sd_rec['timestamp'] + + sweep = { + 'lidar_path': Path(lidar_path).relative_to(data_path).__str__(), + 'sample_data_token': curr_sd_rec['token'], + 'transform_matrix': tm, + 'global_from_car': global_from_car, + 'car_from_current': car_from_current, + 'time_lag': time_lag, + } + sweeps.append(sweep) + + info['sweeps'] = sweeps + + assert len(info['sweeps']) == max_sweeps - 1, \ + f"sweep {curr_sd_rec['token']} only has {len(info['sweeps'])} sweeps, " \ + f"you should duplicate to sweep num {max_sweeps - 1}" + + if not test: + annotations = [nusc.get('sample_annotation', token) for token in sample['anns']] + + # the filtering gives 0.5~1 map improvement + num_lidar_pts = np.array([anno['num_lidar_pts'] for anno in annotations]) + num_radar_pts = np.array([anno['num_radar_pts'] for anno in annotations]) + mask = (num_lidar_pts + num_radar_pts > 0) + + locs = np.array([b.center for b in ref_boxes]).reshape(-1, 3) + dims = np.array([b.wlh for b in ref_boxes]).reshape(-1, 3)[:, [1, 0, 2]] # wlh == > dxdydz (lwh) + velocity = np.array([b.velocity for b in ref_boxes]).reshape(-1, 3) + rots = np.array([quaternion_yaw(b.orientation) for b in ref_boxes]).reshape(-1, 1) + names = np.array([b.name for b in ref_boxes]) + tokens = np.array([b.token for b in ref_boxes]) + gt_boxes = np.concatenate([locs, dims, rots, velocity[:, :2]], axis=1) + + assert len(annotations) == len(gt_boxes) == len(velocity) + + info['gt_boxes'] = gt_boxes[mask, :] + info['gt_boxes_velocity'] = velocity[mask, :] + info['gt_names'] = np.array([map_name_from_general_to_detection[name] for name in names])[mask] + info['gt_boxes_token'] = tokens[mask] + info['num_lidar_pts'] = num_lidar_pts[mask] + info['num_radar_pts'] = num_radar_pts[mask] + + if sample['scene_token'] in train_scenes: + train_nusc_infos.append(info) + else: + val_nusc_infos.append(info) + + progress_bar.close() + return train_nusc_infos, val_nusc_infos + + +def boxes_lidar_to_nusenes(det_info): + boxes3d = det_info['boxes_lidar'] + scores = det_info['score'] + labels = det_info['pred_labels'] + + box_list = [] + for k in range(boxes3d.shape[0]): + quat = Quaternion(axis=[0, 0, 1], radians=boxes3d[k, 6]) + velocity = (*boxes3d[k, 7:9], 0.0) if boxes3d.shape[1] == 9 else (0.0, 0.0, 0.0) + box = Box( + boxes3d[k, :3], + boxes3d[k, [4, 3, 5]], # wlh + quat, label=labels[k], score=scores[k], velocity=velocity, + ) + box_list.append(box) + return box_list + + +def lidar_nusc_box_to_global(nusc, boxes, sample_token): + s_record = nusc.get('sample', sample_token) + sample_data_token = s_record['data']['LIDAR_TOP'] + + sd_record = nusc.get('sample_data', sample_data_token) + cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token']) + sensor_record = nusc.get('sensor', cs_record['sensor_token']) + pose_record = nusc.get('ego_pose', sd_record['ego_pose_token']) + + data_path = nusc.get_sample_data_path(sample_data_token) + box_list = [] + for box in boxes: + # Move box to ego vehicle coord system + box.rotate(Quaternion(cs_record['rotation'])) + box.translate(np.array(cs_record['translation'])) + # Move box to global coord system + box.rotate(Quaternion(pose_record['rotation'])) + box.translate(np.array(pose_record['translation'])) + box_list.append(box) + return box_list + + +def transform_det_annos_to_nusc_annos(det_annos, nusc): + nusc_annos = { + 'results': {}, + 'meta': None, + } + + for det in det_annos: + annos = [] + box_list = boxes_lidar_to_nusenes(det) + box_list = lidar_nusc_box_to_global( + nusc=nusc, boxes=box_list, sample_token=det['metadata']['token'] + ) + + for k, box in enumerate(box_list): + name = det['name'][k] + if np.sqrt(box.velocity[0] ** 2 + box.velocity[1] ** 2) > 0.2: + if name in ['car', 'construction_vehicle', 'bus', 'truck', 'trailer']: + attr = 'vehicle.moving' + elif name in ['bicycle', 'motorcycle']: + attr = 'cycle.with_rider' + else: + attr = None + else: + if name in ['pedestrian']: + attr = 'pedestrian.standing' + elif name in ['bus']: + attr = 'vehicle.stopped' + else: + attr = None + attr = attr if attr is not None else max( + cls_attr_dist[name].items(), key=operator.itemgetter(1))[0] + nusc_anno = { + 'sample_token': det['metadata']['token'], + 'translation': box.center.tolist(), + 'size': box.wlh.tolist(), + 'rotation': box.orientation.elements.tolist(), + 'velocity': box.velocity[:2].tolist(), + 'detection_name': name, + 'detection_score': box.score, + 'attribute_name': attr + } + annos.append(nusc_anno) + + nusc_annos['results'].update({det["metadata"]["token"]: annos}) + + return nusc_annos + + +def format_nuscene_results(metrics, class_names, version='default'): + result = '----------------Nuscene %s results-----------------\n' % version + for name in class_names: + threshs = ', '.join(list(metrics['label_aps'][name].keys())) + ap_list = list(metrics['label_aps'][name].values()) + + err_name =', '.join([x.split('_')[0] for x in list(metrics['label_tp_errors'][name].keys())]) + error_list = list(metrics['label_tp_errors'][name].values()) + + result += f'***{name} error@{err_name} | AP@{threshs}\n' + result += ', '.join(['%.2f' % x for x in error_list]) + ' | ' + result += ', '.join(['%.2f' % (x * 100) for x in ap_list]) + result += f" | mean AP: {metrics['mean_dist_aps'][name]}" + result += '\n' + + result += '--------------average performance-------------\n' + details = {} + for key, val in metrics['tp_errors'].items(): + result += '%s:\t %.4f\n' % (key, val) + details[key] = val + + result += 'mAP:\t %.4f\n' % metrics['mean_ap'] + result += 'NDS:\t %.4f\n' % metrics['nd_score'] + + details.update({ + 'mAP': metrics['mean_ap'], + 'NDS': metrics['nd_score'], + }) + + return result, details diff --git a/toolbox/openpcdet/pcdet/datasets/once/__init__.py b/toolbox/openpcdet/pcdet/datasets/once/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/datasets/once/once_dataset.py b/toolbox/openpcdet/pcdet/datasets/once/once_dataset.py new file mode 100644 index 000000000..0ff079843 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/once/once_dataset.py @@ -0,0 +1,444 @@ +import copy +import pickle +import numpy as np + +from PIL import Image +import torch +import torch.nn.functional as F +from pathlib import Path + +from ..dataset import DatasetTemplate +from ...ops.roiaware_pool3d import roiaware_pool3d_utils +from ...utils import box_utils +from .once_toolkits import Octopus + +class ONCEDataset(DatasetTemplate): + def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None): + """ + Args: + root_path: + dataset_cfg: + class_names: + training: + logger: + """ + super().__init__( + dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger + ) + self.split = dataset_cfg.DATA_SPLIT['train'] if training else dataset_cfg.DATA_SPLIT['test'] + assert self.split in ['train', 'val', 'test', 'raw_small', 'raw_medium', 'raw_large'] + + split_dir = self.root_path / 'ImageSets' / (self.split + '.txt') + self.sample_seq_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None + self.cam_names = ['cam01', 'cam03', 'cam05', 'cam06', 'cam07', 'cam08', 'cam09'] + self.cam_tags = ['top', 'top2', 'left_back', 'left_front', 'right_front', 'right_back', 'back'] + self.toolkits = Octopus(self.root_path) + + self.once_infos = [] + self.include_once_data(self.split) + + def include_once_data(self, split): + if self.logger is not None: + self.logger.info('Loading ONCE dataset') + once_infos = [] + + for info_path in self.dataset_cfg.INFO_PATH[split]: + info_path = self.root_path / info_path + if not info_path.exists(): + continue + with open(info_path, 'rb') as f: + infos = pickle.load(f) + once_infos.extend(infos) + + def check_annos(info): + return 'annos' in info + + if self.split != 'raw': + once_infos = list(filter(check_annos,once_infos)) + + self.once_infos.extend(once_infos) + + if self.logger is not None: + self.logger.info('Total samples for ONCE dataset: %d' % (len(once_infos))) + + def set_split(self, split): + super().__init__( + dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, root_path=self.root_path, logger=self.logger + ) + self.split = split + + split_dir = self.root_path / 'ImageSets' / (self.split + '.txt') + self.sample_seq_list = [x.strip() for x in open(split_dir).readlines()] if split_dir.exists() else None + + def get_lidar(self, sequence_id, frame_id): + return self.toolkits.load_point_cloud(sequence_id, frame_id) + + def get_image(self, sequence_id, frame_id, cam_name): + return self.toolkits.load_image(sequence_id, frame_id, cam_name) + + def project_lidar_to_image(self, sequence_id, frame_id): + return self.toolkits.project_lidar_to_image(sequence_id, frame_id) + + def point_painting(self, points, info): + semseg_dir = './' # add your own seg directory + used_classes = [0,1,2,3,4,5] + num_classes = len(used_classes) + frame_id = str(info['frame_id']) + seq_id = str(info['sequence_id']) + painted = np.zeros((points.shape[0], num_classes)) # classes + bg + for cam_name in self.cam_names: + img_path = Path(semseg_dir) / Path(seq_id) / Path(cam_name) / Path(frame_id+'_label.png') + calib_info = info['calib'][cam_name] + cam_2_velo = calib_info['cam_to_velo'] + cam_intri = np.hstack([calib_info['cam_intrinsic'], np.zeros((3, 1), dtype=np.float32)]) + point_xyz = points[:, :3] + points_homo = np.hstack( + [point_xyz, np.ones(point_xyz.shape[0], dtype=np.float32).reshape((-1, 1))]) + points_lidar = np.dot(points_homo, np.linalg.inv(cam_2_velo).T) + mask = points_lidar[:, 2] > 0 + points_lidar = points_lidar[mask] + points_img = np.dot(points_lidar, cam_intri.T) + points_img = points_img / points_img[:, [2]] + uv = points_img[:, [0,1]] + #depth = points_img[:, [2]] + seg_map = np.array(Image.open(img_path)) # (H, W) + H, W = seg_map.shape + seg_feats = np.zeros((H*W, num_classes)) + seg_map = seg_map.reshape(-1) + for cls_i in used_classes: + seg_feats[seg_map==cls_i, cls_i] = 1 + seg_feats = seg_feats.reshape(H, W, num_classes).transpose(2, 0, 1) + uv[:, 0] = (uv[:, 0] - W / 2) / (W / 2) + uv[:, 1] = (uv[:, 1] - H / 2) / (H / 2) + uv_tensor = torch.from_numpy(uv).unsqueeze(0).unsqueeze(0) # [1,1,N,2] + seg_feats = torch.from_numpy(seg_feats).unsqueeze(0) # [1,C,H,W] + proj_scores = F.grid_sample(seg_feats, uv_tensor, mode='bilinear', padding_mode='zeros') # [1, C, 1, N] + proj_scores = proj_scores.squeeze(0).squeeze(1).transpose(0, 1).contiguous() # [N, C] + painted[mask] = proj_scores.numpy() + return np.concatenate([points, painted], axis=1) + + def __len__(self): + if self._merge_all_iters_to_one_epoch: + return len(self.once_infos) * self.total_epochs + + return len(self.once_infos) + + def __getitem__(self, index): + if self._merge_all_iters_to_one_epoch: + index = index % len(self.once_infos) + + info = copy.deepcopy(self.once_infos[index]) + frame_id = info['frame_id'] + seq_id = info['sequence_id'] + points = self.get_lidar(seq_id, frame_id) + + if self.dataset_cfg.get('POINT_PAINTING', False): + points = self.point_painting(points, info) + + input_dict = { + 'points': points, + 'frame_id': frame_id, + } + + if 'annos' in info: + annos = info['annos'] + input_dict.update({ + 'gt_names': annos['name'], + 'gt_boxes': annos['boxes_3d'], + 'num_points_in_gt': annos.get('num_points_in_gt', None) + }) + + data_dict = self.prepare_data(data_dict=input_dict) + data_dict.pop('num_points_in_gt', None) + return data_dict + + def get_infos(self, num_workers=4, sample_seq_list=None): + import concurrent.futures as futures + import json + root_path = self.root_path + cam_names = self.cam_names + + """ + # dataset json format + { + 'meta_info': + 'calib': { + 'cam01': { + 'cam_to_velo': list + 'cam_intrinsic': list + 'distortion': list + } + ... + } + 'frames': [ + { + 'frame_id': timestamp, + 'annos': { + 'names': list + 'boxes_3d': list of list + 'boxes_2d': { + 'cam01': list of list + ... + } + } + 'pose': list + }, + ... + ] + } + # open pcdet format + { + 'meta_info': + 'sequence_id': seq_idx + 'frame_id': timestamp + 'timestamp': timestamp + 'lidar': path + 'cam01': path + ... + 'calib': { + 'cam01': { + 'cam_to_velo': np.array + 'cam_intrinsic': np.array + 'distortion': np.array + } + ... + } + 'pose': np.array + 'annos': { + 'name': np.array + 'boxes_3d': np.array + 'boxes_2d': { + 'cam01': np.array + .... + } + } + } + """ + def process_single_sequence(seq_idx): + print('%s seq_idx: %s' % (self.split, seq_idx)) + seq_infos = [] + seq_path = Path(root_path) / 'data' / seq_idx + json_path = seq_path / ('%s.json' % seq_idx) + with open(json_path, 'r') as f: + info_this_seq = json.load(f) + meta_info = info_this_seq['meta_info'] + calib = info_this_seq['calib'] + for f_idx, frame in enumerate(info_this_seq['frames']): + frame_id = frame['frame_id'] + if f_idx == 0: + prev_id = None + else: + prev_id = info_this_seq['frames'][f_idx-1]['frame_id'] + if f_idx == len(info_this_seq['frames'])-1: + next_id = None + else: + next_id = info_this_seq['frames'][f_idx+1]['frame_id'] + pc_path = str(seq_path / 'lidar_roof' / ('%s.bin' % frame_id)) + pose = np.array(frame['pose']) + frame_dict = { + 'sequence_id': seq_idx, + 'frame_id': frame_id, + 'timestamp': int(frame_id), + 'prev_id': prev_id, + 'next_id': next_id, + 'meta_info': meta_info, + 'lidar': pc_path, + 'pose': pose + } + calib_dict = {} + for cam_name in cam_names: + cam_path = str(seq_path / cam_name / ('%s.jpg' % frame_id)) + frame_dict.update({cam_name: cam_path}) + calib_dict[cam_name] = {} + calib_dict[cam_name]['cam_to_velo'] = np.array(calib[cam_name]['cam_to_velo']) + calib_dict[cam_name]['cam_intrinsic'] = np.array(calib[cam_name]['cam_intrinsic']) + calib_dict[cam_name]['distortion'] = np.array(calib[cam_name]['distortion']) + frame_dict.update({'calib': calib_dict}) + + if 'annos' in frame: + annos = frame['annos'] + boxes_3d = np.array(annos['boxes_3d']) + if boxes_3d.shape[0] == 0: + print(frame_id) + continue + boxes_2d_dict = {} + for cam_name in cam_names: + boxes_2d_dict[cam_name] = np.array(annos['boxes_2d'][cam_name]) + annos_dict = { + 'name': np.array(annos['names']), + 'boxes_3d': boxes_3d, + 'boxes_2d': boxes_2d_dict + } + + points = self.get_lidar(seq_idx, frame_id) + corners_lidar = box_utils.boxes_to_corners_3d(np.array(annos['boxes_3d'])) + num_gt = boxes_3d.shape[0] + num_points_in_gt = -np.ones(num_gt, dtype=np.int32) + for k in range(num_gt): + flag = box_utils.in_hull(points[:, 0:3], corners_lidar[k]) + num_points_in_gt[k] = flag.sum() + annos_dict['num_points_in_gt'] = num_points_in_gt + + frame_dict.update({'annos': annos_dict}) + seq_infos.append(frame_dict) + return seq_infos + + sample_seq_list = sample_seq_list if sample_seq_list is not None else self.sample_seq_list + with futures.ThreadPoolExecutor(num_workers) as executor: + infos = executor.map(process_single_sequence, sample_seq_list) + all_infos = [] + for info in infos: + all_infos.extend(info) + return all_infos + + def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'): + import torch + + database_save_path = Path(self.root_path) / ('gt_database' if split == 'train' else ('gt_database_%s' % split)) + db_info_save_path = Path(self.root_path) / ('once_dbinfos_%s.pkl' % split) + + database_save_path.mkdir(parents=True, exist_ok=True) + all_db_infos = {} + + with open(info_path, 'rb') as f: + infos = pickle.load(f) + + for k in range(len(infos)): + if 'annos' not in infos[k]: + continue + print('gt_database sample: %d' % (k + 1)) + info = infos[k] + frame_id = info['frame_id'] + seq_id = info['sequence_id'] + points = self.get_lidar(seq_id, frame_id) + annos = info['annos'] + names = annos['name'] + gt_boxes = annos['boxes_3d'] + + num_obj = gt_boxes.shape[0] + point_indices = roiaware_pool3d_utils.points_in_boxes_cpu( + torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes) + ).numpy() # (nboxes, npoints) + + for i in range(num_obj): + filename = '%s_%s_%d.bin' % (frame_id, names[i], i) + filepath = database_save_path / filename + gt_points = points[point_indices[i] > 0] + + gt_points[:, :3] -= gt_boxes[i, :3] + with open(filepath, 'w') as f: + gt_points.tofile(f) + + db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin + db_info = {'name': names[i], 'path': db_path, 'gt_idx': i, + 'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0]} + if names[i] in all_db_infos: + all_db_infos[names[i]].append(db_info) + else: + all_db_infos[names[i]] = [db_info] + + for k, v in all_db_infos.items(): + print('Database %s: %d' % (k, len(v))) + + with open(db_info_save_path, 'wb') as f: + pickle.dump(all_db_infos, f) + + @staticmethod + def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None): + def get_template_prediction(num_samples): + ret_dict = { + 'name': np.zeros(num_samples), 'score': np.zeros(num_samples), + 'boxes_3d': np.zeros((num_samples, 7)) + } + return ret_dict + + def generate_single_sample_dict(box_dict): + pred_scores = box_dict['pred_scores'].cpu().numpy() + pred_boxes = box_dict['pred_boxes'].cpu().numpy() + pred_labels = box_dict['pred_labels'].cpu().numpy() + pred_dict = get_template_prediction(pred_scores.shape[0]) + if pred_scores.shape[0] == 0: + return pred_dict + + pred_dict['name'] = np.array(class_names)[pred_labels - 1] + pred_dict['score'] = pred_scores + pred_dict['boxes_3d'] = pred_boxes + return pred_dict + + annos = [] + for index, box_dict in enumerate(pred_dicts): + frame_id = batch_dict['frame_id'][index] + single_pred_dict = generate_single_sample_dict(box_dict) + single_pred_dict['frame_id'] = frame_id + annos.append(single_pred_dict) + + if output_path is not None: + raise NotImplementedError + return annos + + def evaluation(self, det_annos, class_names, **kwargs): + from .once_eval.evaluation import get_evaluation_results + + eval_det_annos = copy.deepcopy(det_annos) + eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.once_infos] + ap_result_str, ap_dict = get_evaluation_results(eval_gt_annos, eval_det_annos, class_names) + + return ap_result_str, ap_dict + +def create_once_infos(dataset_cfg, class_names, data_path, save_path, workers=4): + dataset = ONCEDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False) + + splits = ['train', 'val', 'test', 'raw_small', 'raw_medium', 'raw_large'] + ignore = ['test'] + + print('---------------Start to generate data infos---------------') + for split in splits: + if split in ignore: + continue + + filename = 'once_infos_%s.pkl' % split + filename = save_path / Path(filename) + dataset.set_split(split) + once_infos = dataset.get_infos(num_workers=workers) + with open(filename, 'wb') as f: + pickle.dump(once_infos, f) + print('ONCE info %s file is saved to %s' % (split, filename)) + + train_filename = save_path / 'once_infos_train.pkl' + print('---------------Start create groundtruth database for data augmentation---------------') + dataset.set_split('train') + dataset.create_groundtruth_database(train_filename, split='train') + print('---------------Data preparation Done---------------') + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser(description='arg parser') + parser.add_argument('--cfg_file', type=str, default=None, help='specify the config of dataset') + parser.add_argument('--func', type=str, default='create_waymo_infos', help='') + parser.add_argument('--runs_on', type=str, default='server', help='') + args = parser.parse_args() + + if args.func == 'create_once_infos': + import yaml + from pathlib import Path + from easydict import EasyDict + dataset_cfg = EasyDict(yaml.load(open(args.cfg_file))) + + + ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve() + once_data_path = ROOT_DIR / 'data' / 'once' + once_save_path = ROOT_DIR / 'data' / 'once' + + if args.runs_on == 'cloud': + once_data_path = Path('/cache/once/') + once_save_path = Path('/cache/once/') + dataset_cfg.DATA_PATH = dataset_cfg.CLOUD_DATA_PATH + + create_once_infos( + dataset_cfg=dataset_cfg, + class_names=['Car', 'Bus', 'Truck', 'Pedestrian', 'Bicycle'], + data_path=once_data_path, + save_path=once_save_path + ) \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/datasets/once/once_eval/eval_utils.py b/toolbox/openpcdet/pcdet/datasets/once/once_eval/eval_utils.py new file mode 100644 index 000000000..45263923d --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/once/once_eval/eval_utils.py @@ -0,0 +1,53 @@ +import numpy as np + + +def compute_split_parts(num_samples, num_parts): + part_samples = num_samples // num_parts + remain_samples = num_samples % num_parts + if part_samples == 0: + return [num_samples] + if remain_samples == 0: + return [part_samples] * num_parts + else: + return [part_samples] * num_parts + [remain_samples] + + +def overall_filter(boxes): + ignore = np.zeros(boxes.shape[0], dtype=bool) # all false + return ignore + + +def distance_filter(boxes, level): + ignore = np.ones(boxes.shape[0], dtype=bool) # all true + dist = np.sqrt(np.sum(boxes[:, 0:3] * boxes[:, 0:3], axis=1)) + + if level == 0: # 0-30m + flag = dist < 30 + elif level == 1: # 30-50m + flag = (dist >= 30) & (dist < 50) + elif level == 2: # 50m-inf + flag = dist >= 50 + else: + assert False, 'level < 3 for distance metric, found level %s' % (str(level)) + + ignore[flag] = False + return ignore + + +def overall_distance_filter(boxes, level): + ignore = np.ones(boxes.shape[0], dtype=bool) # all true + dist = np.sqrt(np.sum(boxes[:, 0:3] * boxes[:, 0:3], axis=1)) + + if level == 0: + flag = np.ones(boxes.shape[0], dtype=bool) + elif level == 1: # 0-30m + flag = dist < 30 + elif level == 2: # 30-50m + flag = (dist >= 30) & (dist < 50) + elif level == 3: # 50m-inf + flag = dist >= 50 + else: + assert False, 'level < 4 for overall & distance metric, found level %s' % (str(level)) + + ignore[flag] = False + return ignore \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/datasets/once/once_eval/evaluation.py b/toolbox/openpcdet/pcdet/datasets/once/once_eval/evaluation.py new file mode 100644 index 000000000..d890e93d3 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/once/once_eval/evaluation.py @@ -0,0 +1,420 @@ +""" +Evaluation Server +Written by Jiageng Mao +""" + +import numpy as np +import numba + +from .iou_utils import rotate_iou_gpu_eval +from .eval_utils import compute_split_parts, overall_filter, distance_filter, overall_distance_filter + +iou_threshold_dict = { + 'Car': 0.7, + 'Bus': 0.7, + 'Truck': 0.7, + 'Pedestrian': 0.3, + 'Cyclist': 0.5 +} + +superclass_iou_threshold_dict = { + 'Vehicle': 0.7, + 'Pedestrian': 0.3, + 'Cyclist': 0.5 +} + +def get_evaluation_results(gt_annos, pred_annos, classes, + use_superclass=True, + iou_thresholds=None, + num_pr_points=50, + difficulty_mode='Overall&Distance', + ap_with_heading=True, + num_parts=100, + print_ok=False + ): + + if iou_thresholds is None: + if use_superclass: + iou_thresholds = superclass_iou_threshold_dict + else: + iou_thresholds = iou_threshold_dict + + assert len(gt_annos) == len(pred_annos), "the number of GT must match predictions" + assert difficulty_mode in ['Overall&Distance', 'Overall', 'Distance'], "difficulty mode is not supported" + if use_superclass: + if ('Car' in classes) or ('Bus' in classes) or ('Truck' in classes): + assert ('Car' in classes) and ('Bus' in classes) and ('Truck' in classes), "Car/Bus/Truck must all exist for vehicle detection" + classes = [cls_name for cls_name in classes if cls_name not in ['Car', 'Bus', 'Truck']] + classes.insert(0, 'Vehicle') + + num_samples = len(gt_annos) + split_parts = compute_split_parts(num_samples, num_parts) + ious = compute_iou3d(gt_annos, pred_annos, split_parts, with_heading=ap_with_heading) + + num_classes = len(classes) + if difficulty_mode == 'Distance': + num_difficulties = 3 + difficulty_types = ['0-30m', '30-50m', '50m-inf'] + elif difficulty_mode == 'Overall': + num_difficulties = 1 + difficulty_types = ['overall'] + elif difficulty_mode == 'Overall&Distance': + num_difficulties = 4 + difficulty_types = ['overall', '0-30m', '30-50m', '50m-inf'] + else: + raise NotImplementedError + + precision = np.zeros([num_classes, num_difficulties, num_pr_points+1]) + recall = np.zeros([num_classes, num_difficulties, num_pr_points+1]) + + for cls_idx, cur_class in enumerate(classes): + iou_threshold = iou_thresholds[cur_class] + for diff_idx in range(num_difficulties): + ### filter data & determine score thresholds on p-r curve ### + accum_all_scores, gt_flags, pred_flags = [], [], [] + num_valid_gt = 0 + for sample_idx in range(num_samples): + gt_anno = gt_annos[sample_idx] + pred_anno = pred_annos[sample_idx] + pred_score = pred_anno['score'] + iou = ious[sample_idx] + gt_flag, pred_flag = filter_data(gt_anno, pred_anno, difficulty_mode, + difficulty_level=diff_idx, class_name=cur_class, use_superclass=use_superclass) + gt_flags.append(gt_flag) + pred_flags.append(pred_flag) + num_valid_gt += sum(gt_flag == 0) + accum_scores = accumulate_scores(iou, pred_score, gt_flag, pred_flag, + iou_threshold=iou_threshold) + accum_all_scores.append(accum_scores) + all_scores = np.concatenate(accum_all_scores, axis=0) + thresholds = get_thresholds(all_scores, num_valid_gt, num_pr_points=num_pr_points) + + ### compute tp/fp/fn ### + confusion_matrix = np.zeros([len(thresholds), 3]) # only record tp/fp/fn + for sample_idx in range(num_samples): + pred_score = pred_annos[sample_idx]['score'] + iou = ious[sample_idx] + gt_flag, pred_flag = gt_flags[sample_idx], pred_flags[sample_idx] + for th_idx, score_th in enumerate(thresholds): + tp, fp, fn = compute_statistics(iou, pred_score, gt_flag, pred_flag, + score_threshold=score_th, iou_threshold=iou_threshold) + confusion_matrix[th_idx, 0] += tp + confusion_matrix[th_idx, 1] += fp + confusion_matrix[th_idx, 2] += fn + + ### draw p-r curve ### + for th_idx in range(len(thresholds)): + recall[cls_idx, diff_idx, th_idx] = confusion_matrix[th_idx, 0] / \ + (confusion_matrix[th_idx, 0] + confusion_matrix[th_idx, 2]) + precision[cls_idx, diff_idx, th_idx] = confusion_matrix[th_idx, 0] / \ + (confusion_matrix[th_idx, 0] + confusion_matrix[th_idx, 1]) + + for th_idx in range(len(thresholds)): + precision[cls_idx, diff_idx, th_idx] = np.max( + precision[cls_idx, diff_idx, th_idx:], axis=-1) + recall[cls_idx, diff_idx, th_idx] = np.max( + recall[cls_idx, diff_idx, th_idx:], axis=-1) + + AP = 0 + for i in range(1, precision.shape[-1]): + AP += precision[..., i] + AP = AP / num_pr_points * 100 + + ret_dict = {} + + ret_str = "\n|AP@%-9s|" % (str(num_pr_points)) + for diff_type in difficulty_types: + ret_str += '%-12s|' % diff_type + ret_str += '\n' + for cls_idx, cur_class in enumerate(classes): + ret_str += "|%-12s|" % cur_class + for diff_idx in range(num_difficulties): + diff_type = difficulty_types[diff_idx] + key = 'AP_' + cur_class + '/' + diff_type + ap_score = AP[cls_idx,diff_idx] + ret_dict[key] = ap_score + ret_str += "%-12.2f|" % ap_score + ret_str += "\n" + mAP = np.mean(AP, axis=0) + ret_str += "|%-12s|" % 'mAP' + for diff_idx in range(num_difficulties): + diff_type = difficulty_types[diff_idx] + key = 'AP_mean' + '/' + diff_type + ap_score = mAP[diff_idx] + ret_dict[key] = ap_score + ret_str += "%-12.2f|" % ap_score + ret_str += "\n" + + if print_ok: + print(ret_str) + return ret_str, ret_dict + +@numba.jit(nopython=True) +def get_thresholds(scores, num_gt, num_pr_points): + eps = 1e-6 + scores.sort() + scores = scores[::-1] + recall_level = 0 + thresholds = [] + for i, score in enumerate(scores): + l_recall = (i + 1) / num_gt + if i < (len(scores) - 1): + r_recall = (i + 2) / num_gt + else: + r_recall = l_recall + if (r_recall + l_recall < 2 * recall_level) and i < (len(scores) - 1): + continue + thresholds.append(score) + recall_level += 1 / num_pr_points + # avoid numerical errors + # while r_recall + l_recall >= 2 * recall_level: + while r_recall + l_recall + eps > 2 * recall_level: + thresholds.append(score) + recall_level += 1 / num_pr_points + return thresholds + +@numba.jit(nopython=True) +def accumulate_scores(iou, pred_scores, gt_flag, pred_flag, iou_threshold): + num_gt = iou.shape[0] + num_pred = iou.shape[1] + assigned = np.full(num_pred, False) + accum_scores = np.zeros(num_gt) + accum_idx = 0 + for i in range(num_gt): + if gt_flag[i] == -1: # not the same class + continue + det_idx = -1 + detected_score = -1 + for j in range(num_pred): + if pred_flag[j] == -1: # not the same class + continue + if assigned[j]: + continue + iou_ij = iou[i, j] + pred_score = pred_scores[j] + if (iou_ij > iou_threshold) and (pred_score > detected_score): + det_idx = j + detected_score = pred_score + + if (detected_score == -1) and (gt_flag[i] == 0): # false negative + pass + elif (detected_score != -1) and (gt_flag[i] == 1 or pred_flag[det_idx] == 1): # ignore + assigned[det_idx] = True + elif detected_score != -1: # true positive + accum_scores[accum_idx] = pred_scores[det_idx] + accum_idx += 1 + assigned[det_idx] = True + + return accum_scores[:accum_idx] + +@numba.jit(nopython=True) +def compute_statistics(iou, pred_scores, gt_flag, pred_flag, score_threshold, iou_threshold): + num_gt = iou.shape[0] + num_pred = iou.shape[1] + assigned = np.full(num_pred, False) + under_threshold = pred_scores < score_threshold + + tp, fp, fn = 0, 0, 0 + for i in range(num_gt): + if gt_flag[i] == -1: # different classes + continue + det_idx = -1 + detected = False + best_matched_iou = 0 + gt_assigned_to_ignore = False + + for j in range(num_pred): + if pred_flag[j] == -1: # different classes + continue + if assigned[j]: # already assigned to other GT + continue + if under_threshold[j]: # compute only boxes above threshold + continue + iou_ij = iou[i, j] + if (iou_ij > iou_threshold) and (iou_ij > best_matched_iou or gt_assigned_to_ignore) and pred_flag[j] == 0: + best_matched_iou = iou_ij + det_idx = j + detected = True + gt_assigned_to_ignore = False + elif (iou_ij > iou_threshold) and (not detected) and pred_flag[j] == 1: + det_idx = j + detected = True + gt_assigned_to_ignore = True + + if (not detected) and gt_flag[i] == 0: # false negative + fn += 1 + elif detected and (gt_flag[i] == 1 or pred_flag[det_idx] == 1): # ignore + assigned[det_idx] = True + elif detected: # true positive + tp += 1 + assigned[det_idx] = True + + for j in range(num_pred): + if not (assigned[j] or pred_flag[j] == -1 or pred_flag[j] == 1 or under_threshold[j]): + fp += 1 + + return tp, fp, fn + +def filter_data(gt_anno, pred_anno, difficulty_mode, difficulty_level, class_name, use_superclass): + """ + Filter data by class name and difficulty + + Args: + gt_anno: + pred_anno: + difficulty_mode: + difficulty_level: + class_name: + + Returns: + gt_flags/pred_flags: + 1 : same class but ignored with different difficulty levels + 0 : accepted + -1 : rejected with different classes + """ + num_gt = len(gt_anno['name']) + gt_flag = np.zeros(num_gt, dtype=np.int64) + if use_superclass: + if class_name == 'Vehicle': + reject = np.logical_or(gt_anno['name']=='Pedestrian', gt_anno['name']=='Cyclist') + else: + reject = gt_anno['name'] != class_name + else: + reject = gt_anno['name'] != class_name + gt_flag[reject] = -1 + num_pred = len(pred_anno['name']) + pred_flag = np.zeros(num_pred, dtype=np.int64) + if use_superclass: + if class_name == 'Vehicle': + reject = np.logical_or(pred_anno['name']=='Pedestrian', pred_anno['name']=='Cyclist') + else: + reject = pred_anno['name'] != class_name + else: + reject = pred_anno['name'] != class_name + pred_flag[reject] = -1 + + if difficulty_mode == 'Overall': + ignore = overall_filter(gt_anno['boxes_3d']) + gt_flag[ignore] = 1 + ignore = overall_filter(pred_anno['boxes_3d']) + pred_flag[ignore] = 1 + elif difficulty_mode == 'Distance': + ignore = distance_filter(gt_anno['boxes_3d'], difficulty_level) + gt_flag[ignore] = 1 + ignore = distance_filter(pred_anno['boxes_3d'], difficulty_level) + pred_flag[ignore] = 1 + elif difficulty_mode == 'Overall&Distance': + ignore = overall_distance_filter(gt_anno['boxes_3d'], difficulty_level) + gt_flag[ignore] = 1 + ignore = overall_distance_filter(pred_anno['boxes_3d'], difficulty_level) + pred_flag[ignore] = 1 + else: + raise NotImplementedError + + return gt_flag, pred_flag + +def iou3d_kernel(gt_boxes, pred_boxes): + """ + Core iou3d computation (with cuda) + + Args: + gt_boxes: [N, 7] (x, y, z, w, l, h, rot) in Lidar coordinates + pred_boxes: [M, 7] + + Returns: + iou3d: [N, M] + """ + intersection_2d = rotate_iou_gpu_eval(gt_boxes[:, [0, 1, 3, 4, 6]], pred_boxes[:, [0, 1, 3, 4, 6]], criterion=2) + gt_max_h = gt_boxes[:, [2]] + gt_boxes[:, [5]] * 0.5 + gt_min_h = gt_boxes[:, [2]] - gt_boxes[:, [5]] * 0.5 + pred_max_h = pred_boxes[:, [2]] + pred_boxes[:, [5]] * 0.5 + pred_min_h = pred_boxes[:, [2]] - pred_boxes[:, [5]] * 0.5 + max_of_min = np.maximum(gt_min_h, pred_min_h.T) + min_of_max = np.minimum(gt_max_h, pred_max_h.T) + inter_h = min_of_max - max_of_min + inter_h[inter_h <= 0] = 0 + #inter_h[intersection_2d <= 0] = 0 + intersection_3d = intersection_2d * inter_h + gt_vol = gt_boxes[:, [3]] * gt_boxes[:, [4]] * gt_boxes[:, [5]] + pred_vol = pred_boxes[:, [3]] * pred_boxes[:, [4]] * pred_boxes[:, [5]] + union_3d = gt_vol + pred_vol.T - intersection_3d + #eps = 1e-6 + #union_3d[union_3d= np.pi] = reverse_diff_rot[diff_rot >= np.pi] # constrain to [0-pi] + iou3d[diff_rot > np.pi/2] = 0 # unmatched if diff_rot > 90 + return iou3d + +def compute_iou3d(gt_annos, pred_annos, split_parts, with_heading): + """ + Compute iou3d of all samples by parts + + Args: + with_heading: filter with heading + gt_annos: list of dicts for each sample + pred_annos: + split_parts: for part-based iou computation + + Returns: + ious: list of iou arrays for each sample + """ + gt_num_per_sample = np.stack([len(anno["name"]) for anno in gt_annos], 0) + pred_num_per_sample = np.stack([len(anno["name"]) for anno in pred_annos], 0) + ious = [] + sample_idx = 0 + for num_part_samples in split_parts: + gt_annos_part = gt_annos[sample_idx:sample_idx + num_part_samples] + pred_annos_part = pred_annos[sample_idx:sample_idx + num_part_samples] + + gt_boxes = np.concatenate([anno["boxes_3d"] for anno in gt_annos_part], 0) + pred_boxes = np.concatenate([anno["boxes_3d"] for anno in pred_annos_part], 0) + + if with_heading: + iou3d_part = iou3d_kernel_with_heading(gt_boxes, pred_boxes) + else: + iou3d_part = iou3d_kernel(gt_boxes, pred_boxes) + + gt_num_idx, pred_num_idx = 0, 0 + for idx in range(num_part_samples): + gt_box_num = gt_num_per_sample[sample_idx + idx] + pred_box_num = pred_num_per_sample[sample_idx + idx] + ious.append(iou3d_part[gt_num_idx: gt_num_idx + gt_box_num, pred_num_idx: pred_num_idx+pred_box_num]) + gt_num_idx += gt_box_num + pred_num_idx += pred_box_num + sample_idx += num_part_samples + return ious \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/datasets/once/once_eval/iou_utils.py b/toolbox/openpcdet/pcdet/datasets/once/once_eval/iou_utils.py new file mode 100644 index 000000000..927056e58 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/once/once_eval/iou_utils.py @@ -0,0 +1,344 @@ +""" +Rotate IoU computation is referred from https://github.com/hongzhenwang/RRPN-revise +""" +import math +import numba +import numpy as np +from numba import cuda + +@numba.jit(nopython=True) +def div_up(m, n): + return m // n + (m % n > 0) + + +@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True) +def trangle_area(a, b, c): + return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) * + (b[0] - c[0])) / 2.0 + + +@cuda.jit('(float32[:], int32)', device=True, inline=True) +def area(int_pts, num_of_inter): + area_val = 0.0 + for i in range(num_of_inter - 2): + area_val += abs( + trangle_area(int_pts[:2], int_pts[2 * i + 2:2 * i + 4], + int_pts[2 * i + 4:2 * i + 6])) + return area_val + + +@cuda.jit('(float32[:], int32)', device=True, inline=True) +def sort_vertex_in_convex_polygon(int_pts, num_of_inter): + if num_of_inter > 0: + center = cuda.local.array((2,), dtype=numba.float32) + center[:] = 0.0 + for i in range(num_of_inter): + center[0] += int_pts[2 * i] + center[1] += int_pts[2 * i + 1] + center[0] /= num_of_inter + center[1] /= num_of_inter + v = cuda.local.array((2,), dtype=numba.float32) + vs = cuda.local.array((16,), dtype=numba.float32) + for i in range(num_of_inter): + v[0] = int_pts[2 * i] - center[0] + v[1] = int_pts[2 * i + 1] - center[1] + d = math.sqrt(v[0] * v[0] + v[1] * v[1]) + v[0] = v[0] / d + v[1] = v[1] / d + if v[1] < 0: + v[0] = -2 - v[0] + vs[i] = v[0] + j = 0 + temp = 0 + for i in range(1, num_of_inter): + if vs[i - 1] > vs[i]: + temp = vs[i] + tx = int_pts[2 * i] + ty = int_pts[2 * i + 1] + j = i + while j > 0 and vs[j - 1] > temp: + vs[j] = vs[j - 1] + int_pts[j * 2] = int_pts[j * 2 - 2] + int_pts[j * 2 + 1] = int_pts[j * 2 - 1] + j -= 1 + + vs[j] = temp + int_pts[j * 2] = tx + int_pts[j * 2 + 1] = ty + + +@cuda.jit( + '(float32[:], float32[:], int32, int32, float32[:])', + device=True, + inline=True) +def line_segment_intersection(pts1, pts2, i, j, temp_pts): + A = cuda.local.array((2,), dtype=numba.float32) + B = cuda.local.array((2,), dtype=numba.float32) + C = cuda.local.array((2,), dtype=numba.float32) + D = cuda.local.array((2,), dtype=numba.float32) + + A[0] = pts1[2 * i] + A[1] = pts1[2 * i + 1] + + B[0] = pts1[2 * ((i + 1) % 4)] + B[1] = pts1[2 * ((i + 1) % 4) + 1] + + C[0] = pts2[2 * j] + C[1] = pts2[2 * j + 1] + + D[0] = pts2[2 * ((j + 1) % 4)] + D[1] = pts2[2 * ((j + 1) % 4) + 1] + BA0 = B[0] - A[0] + BA1 = B[1] - A[1] + DA0 = D[0] - A[0] + CA0 = C[0] - A[0] + DA1 = D[1] - A[1] + CA1 = C[1] - A[1] + acd = DA1 * CA0 > CA1 * DA0 + bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0]) + if acd != bcd: + abc = CA1 * BA0 > BA1 * CA0 + abd = DA1 * BA0 > BA1 * DA0 + if abc != abd: + DC0 = D[0] - C[0] + DC1 = D[1] - C[1] + ABBA = A[0] * B[1] - B[0] * A[1] + CDDC = C[0] * D[1] - D[0] * C[1] + DH = BA1 * DC0 - BA0 * DC1 + Dx = ABBA * DC0 - BA0 * CDDC + Dy = ABBA * DC1 - BA1 * CDDC + temp_pts[0] = Dx / DH + temp_pts[1] = Dy / DH + return True + return False + + +@cuda.jit( + '(float32[:], float32[:], int32, int32, float32[:])', + device=True, + inline=True) +def line_segment_intersection_v1(pts1, pts2, i, j, temp_pts): + a = cuda.local.array((2,), dtype=numba.float32) + b = cuda.local.array((2,), dtype=numba.float32) + c = cuda.local.array((2,), dtype=numba.float32) + d = cuda.local.array((2,), dtype=numba.float32) + + a[0] = pts1[2 * i] + a[1] = pts1[2 * i + 1] + + b[0] = pts1[2 * ((i + 1) % 4)] + b[1] = pts1[2 * ((i + 1) % 4) + 1] + + c[0] = pts2[2 * j] + c[1] = pts2[2 * j + 1] + + d[0] = pts2[2 * ((j + 1) % 4)] + d[1] = pts2[2 * ((j + 1) % 4) + 1] + + area_abc = trangle_area(a, b, c) + area_abd = trangle_area(a, b, d) + + if area_abc * area_abd >= 0: + return False + + area_cda = trangle_area(c, d, a) + area_cdb = area_cda + area_abc - area_abd + + if area_cda * area_cdb >= 0: + return False + t = area_cda / (area_abd - area_abc) + + dx = t * (b[0] - a[0]) + dy = t * (b[1] - a[1]) + temp_pts[0] = a[0] + dx + temp_pts[1] = a[1] + dy + return True + +""" +@cuda.jit('(float32, float32, float32[:])', device=True, inline=True) +def point_in_quadrilateral(pt_x, pt_y, corners): + ab0 = corners[2] - corners[0] + ab1 = corners[3] - corners[1] + + ad0 = corners[6] - corners[0] + ad1 = corners[7] - corners[1] + + ap0 = pt_x - corners[0] + ap1 = pt_y - corners[1] + + abab = ab0 * ab0 + ab1 * ab1 + abap = ab0 * ap0 + ab1 * ap1 + adad = ad0 * ad0 + ad1 * ad1 + adap = ad0 * ap0 + ad1 * ap1 + + return abab >= abap and abap >= 0 and adad >= adap and adap >= 0 +""" + +@cuda.jit('(float32, float32, float32[:])', device=True, inline=True) +def point_in_quadrilateral(pt_x, pt_y, corners): + PA0 = corners[0] - pt_x + PA1 = corners[1] - pt_y + PB0 = corners[2] - pt_x + PB1 = corners[3] - pt_y + PC0 = corners[4] - pt_x + PC1 = corners[5] - pt_y + PD0 = corners[6] - pt_x + PD1 = corners[7] - pt_y + PAB = PA0 * PB1 - PB0 * PA1 + PBC = PB0 * PC1 - PC0 * PB1 + PCD = PC0 * PD1 - PD0 * PC1 + PDA = PD0 * PA1 - PA0 * PD1 + return PAB >= 0 and PBC >= 0 and PCD >= 0 and PDA >= 0 or \ + PAB <= 0 and PBC <= 0 and PCD <= 0 and PDA <= 0 + +@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True) +def quadrilateral_intersection(pts1, pts2, int_pts): + num_of_inter = 0 + for i in range(4): + if point_in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2): + int_pts[num_of_inter * 2] = pts1[2 * i] + int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1] + num_of_inter += 1 + if point_in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1): + int_pts[num_of_inter * 2] = pts2[2 * i] + int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1] + num_of_inter += 1 + temp_pts = cuda.local.array((2,), dtype=numba.float32) + for i in range(4): + for j in range(4): + has_pts = line_segment_intersection(pts1, pts2, i, j, temp_pts) + if has_pts: + int_pts[num_of_inter * 2] = temp_pts[0] + int_pts[num_of_inter * 2 + 1] = temp_pts[1] + num_of_inter += 1 + + return num_of_inter + +@cuda.jit('(float32[:], float32[:])', device=True, inline=True) +def rbbox_to_corners(corners, rbbox): + # generate clockwise corners and rotate it clockwise + angle = rbbox[4] + a_cos = math.cos(angle) + a_sin = math.sin(angle) + center_x = rbbox[0] + center_y = rbbox[1] + x_d = rbbox[2] + y_d = rbbox[3] + corners_x = cuda.local.array((4,), dtype=numba.float32) + corners_y = cuda.local.array((4,), dtype=numba.float32) + corners_x[0] = -x_d / 2 + corners_x[1] = -x_d / 2 + corners_x[2] = x_d / 2 + corners_x[3] = x_d / 2 + corners_y[0] = -y_d / 2 + corners_y[1] = y_d / 2 + corners_y[2] = y_d / 2 + corners_y[3] = -y_d / 2 + for i in range(4): + corners[2 * + i] = a_cos * corners_x[i] + a_sin * corners_y[i] + center_x + corners[2 * i + + 1] = -a_sin * corners_x[i] + a_cos * corners_y[i] + center_y + + +@cuda.jit('(float32[:], float32[:])', device=True, inline=True) +def inter(rbbox1, rbbox2): + corners1 = cuda.local.array((8,), dtype=numba.float32) + corners2 = cuda.local.array((8,), dtype=numba.float32) + intersection_corners = cuda.local.array((16,), dtype=numba.float32) + + rbbox_to_corners(corners1, rbbox1) + rbbox_to_corners(corners2, rbbox2) + + num_intersection = quadrilateral_intersection(corners1, corners2, + intersection_corners) + sort_vertex_in_convex_polygon(intersection_corners, num_intersection) + # print(intersection_corners.reshape([-1, 2])[:num_intersection]) + + return area(intersection_corners, num_intersection) + + +@cuda.jit('(float32[:], float32[:], int32)', device=True, inline=True) +def devRotateIoUEval(rbox1, rbox2, criterion=-1): + area1 = rbox1[2] * rbox1[3] + area2 = rbox2[2] * rbox2[3] + area_inter = inter(rbox1, rbox2) + if criterion == -1: + return area_inter / (area1 + area2 - area_inter) + elif criterion == 0: + return area_inter / area1 + elif criterion == 1: + return area_inter / area2 + else: + return area_inter + + +@cuda.jit('(int64, int64, float32[:], float32[:], float32[:], int32)', fastmath=False) +def rotate_iou_kernel_eval(N, K, dev_boxes, dev_query_boxes, dev_iou, criterion=-1): + threadsPerBlock = 8 * 8 + row_start = cuda.blockIdx.x + col_start = cuda.blockIdx.y + tx = cuda.threadIdx.x + row_size = min(N - row_start * threadsPerBlock, threadsPerBlock) + col_size = min(K - col_start * threadsPerBlock, threadsPerBlock) + block_boxes = cuda.shared.array(shape=(64 * 5,), dtype=numba.float32) + block_qboxes = cuda.shared.array(shape=(64 * 5,), dtype=numba.float32) + + dev_query_box_idx = threadsPerBlock * col_start + tx + dev_box_idx = threadsPerBlock * row_start + tx + if (tx < col_size): + block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0] + block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1] + block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2] + block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3] + block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4] + if (tx < row_size): + block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0] + block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1] + block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2] + block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3] + block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4] + cuda.syncthreads() + if tx < row_size: + for i in range(col_size): + offset = row_start * threadsPerBlock * K + col_start * threadsPerBlock + tx * K + i + dev_iou[offset] = devRotateIoUEval(block_qboxes[i * 5:i * 5 + 5], + block_boxes[tx * 5:tx * 5 + 5], criterion) + + +def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0): + """rotated box iou running in gpu. 500x faster than cpu version + (take 5ms in one example with numba.cuda code). + convert from [this project]( + https://github.com/hongzhenwang/RRPN-revise/tree/master/pcdet/rotation). + + Args: + boxes (float tensor: [N, 5]): rbboxes. format: centers, dims, + angles(clockwise when positive) + query_boxes (float tensor: [K, 5]): [description] + device_id (int, optional): Defaults to 0. [description] + + Returns: + [type]: [description] + """ + box_dtype = boxes.dtype + boxes = boxes.astype(np.float32) + query_boxes = query_boxes.astype(np.float32) + N = boxes.shape[0] + K = query_boxes.shape[0] + iou = np.zeros((N, K), dtype=np.float32) + if N == 0 or K == 0: + return iou + threadsPerBlock = 8 * 8 + cuda.select_device(device_id) + blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock)) + + stream = cuda.stream() + with stream.auto_synchronize(): + boxes_dev = cuda.to_device(boxes.reshape([-1]), stream) + query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream) + iou_dev = cuda.to_device(iou.reshape([-1]), stream) + rotate_iou_kernel_eval[blockspergrid, threadsPerBlock, stream]( + N, K, boxes_dev, query_boxes_dev, iou_dev, criterion) + iou_dev.copy_to_host(iou.reshape([-1]), stream=stream) + return iou.astype(boxes.dtype) \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/datasets/once/once_toolkits.py b/toolbox/openpcdet/pcdet/datasets/once/once_toolkits.py new file mode 100644 index 000000000..ee5666f10 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/once/once_toolkits.py @@ -0,0 +1,125 @@ +import json +import os.path as osp +from collections import defaultdict +import cv2 +import numpy as np + +class Octopus(object): + """ + dataset structure: + - data_root + - train_split.txt + - val_split.txt + - test_split.txt + - + """ + camera_names = ['cam01', 'cam03', 'cam05', 'cam06', 'cam07', 'cam08', 'cam09'] + camera_tags = ['top', 'top2', 'left_back', 'left_front', 'right_front', 'right_back', 'back'] + + def __init__(self, dataset_root): + self.dataset_root = dataset_root + self.data_root = osp.join(self.dataset_root, 'data') + self._collect_basic_infos() + + @property + def train_split_list(self): + if not osp.isfile(osp.join(self.dataset_root, 'ImageSets', 'train_set.txt')): + train_split_list = None + else: + train_split_list = set(map(lambda x: x.strip(), + open(osp.join(self.data_root, 'train_set.txt')).readlines())) + return train_split_list + + @property + def val_split_list(self): + if not osp.isfile(osp.join(self.dataset_root, 'ImageSets', 'val_set.txt')): + val_split_list = None + else: + val_split_list = set(map(lambda x: x.strip(), + open(osp.join(self.data_root, 'val_set.txt')).readlines())) + return val_split_list + + @property + def test_split_list(self): + if not osp.isfile(osp.join(self.dataset_root, 'ImageSets', 'test_set.txt')): + test_split_list = None + else: + test_split_list = set(map(lambda x: x.strip(), + open(osp.join(self.data_root, 'test_set.txt')).readlines())) + return test_split_list + + @property + def raw_split_list(self): + if not osp.isfile(osp.join(self.dataset_root, 'ImageSets', 'raw_set.txt')): + raw_split_list = None + else: + raw_split_list = set(map(lambda x: x.strip(), + open(osp.join(self.data_root, 'raw_set.txt')).readlines())) + return raw_split_list + + def _find_split_name(self, seq_id): + if seq_id in self.raw_split_list: + return 'raw' + if seq_id in self.train_split_list: + return 'train' + if seq_id in self.test_split_list: + return 'test' + if seq_id in self.val_split_list: + return 'val' + print("sequence id {} corresponding to no split".format(seq_id)) + raise NotImplementedError + + def _collect_basic_infos(self): + self.train_info = defaultdict(dict) + if self.train_split_list is not None: + for train_seq in self.train_split_list: + anno_file_path = osp.join(self.data_root, train_seq, '{}.json'.format(train_seq)) + if not osp.isfile(anno_file_path): + print("no annotation file for sequence {}".format(train_seq)) + raise FileNotFoundError + anno_file = json.load(open(anno_file_path, 'r')) + for frame_anno in anno_file['frames']: + self.train_info[train_seq][frame_anno['frame_id']] = { + 'pose': frame_anno['pose'], + 'calib': anno_file['calib'], + } + + def get_frame_anno(self, seq_id, frame_id): + split_name = self._find_split_name(seq_id) + frame_info = getattr(self, '{}_info'.format(split_name))[seq_id][frame_id] + if 'anno' in frame_info: + return frame_info['anno'] + return None + + def load_point_cloud(self, seq_id, frame_id): + bin_path = osp.join(self.data_root, seq_id, 'lidar_roof', '{}.bin'.format(frame_id)) + points = np.fromfile(bin_path, dtype=np.float32).reshape(-1, 4) + return points + + def load_image(self, seq_id, frame_id, cam_name): + cam_path = osp.join(self.data_root, seq_id, cam_name, '{}.jpg'.format(frame_id)) + img_buf = cv2.cvtColor(cv2.imread(cam_path), cv2.COLOR_BGR2RGB) + return img_buf + + def project_lidar_to_image(self, seq_id, frame_id): + points = self.load_point_cloud(seq_id, frame_id) + + split_name = self._find_split_name(seq_id) + frame_info = getattr(self, '{}_info'.format(split_name))[seq_id][frame_id] + points_img_dict = dict() + for cam_name in self.__class__.camera_names: + calib_info = frame_info['calib'][cam_name] + cam_2_velo = calib_info['cam_to_velo'] + cam_intri = calib_info['cam_intrinsic'] + point_xyz = points[:, :3] + points_homo = np.hstack( + [point_xyz, np.ones(point_xyz.shape[0], dtype=np.float32).reshape((-1, 1))]) + points_lidar = np.dot(points_homo, np.linalg.inv(cam_2_velo).T) + mask = points_lidar[:, 2] > 0 + points_lidar = points_lidar[mask] + points_img = np.dot(points_lidar, cam_intri.T) + points_img_dict[cam_name] = points_img + return points_img_dict + + def undistort_image(self, seq_id, frame_id): + pass \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/datasets/pandaset/__init__.py b/toolbox/openpcdet/pcdet/datasets/pandaset/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/datasets/pandaset/pandaset_dataset.py b/toolbox/openpcdet/pcdet/datasets/pandaset/pandaset_dataset.py new file mode 100644 index 000000000..b3c86d51e --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/pandaset/pandaset_dataset.py @@ -0,0 +1,489 @@ +""" + Dataset from Pandaset (Hesai) +""" + +import pickle +import os +try: + import pandas as pd + import pandaset as ps +except: + pass +import numpy as np + +from ..dataset import DatasetTemplate +from ...ops.roiaware_pool3d import roiaware_pool3d_utils + +import torch + + +def pose_dict_to_numpy(pose): + """ + Conert pandaset pose dict to a numpy vector in order to pass it through the network + """ + pose_np = [pose["position"]["x"], + pose["position"]["y"], + pose["position"]["z"], + pose["heading"]["w"], + pose["heading"]["x"], + pose["heading"]["y"], + pose["heading"]["z"]] + + return pose_np + + +def pose_numpy_to_dict(pose): + """ + Conert pandaset pose dict to a numpy vector in order to pass it through the network + """ + pose_dict = {'position': + {'x': pose[0], + 'y': pose[1], + 'z': pose[2]}, + 'heading': + {'w': pose[3], + 'x': pose[4], + 'y': pose[5], + 'z': pose[6]}} + + return pose_dict + + +class PandasetDataset(DatasetTemplate): + def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None): + """ + Args: + root_path: + dataset_cfg: + class_names: + training: + logger: + """ + super().__init__( + dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger + ) + if root_path is None: + root_path = self.dataset_cfg.DATA_PATH + self.dataset = ps.DataSet(os.path.join(root_path, 'dataset')) + self.split = self.dataset_cfg.DATA_SPLIT[self.mode] + self.pandaset_infos = [] + self.include_pandaset_infos(self.mode) + + + def include_pandaset_infos(self, mode): + if self.logger is not None: + self.logger.info('Loading PandaSet dataset') + pandaset_infos = [] + + for info_path in self.dataset_cfg.INFO_PATH[mode]: + info_path = os.path.join(self.root_path, info_path) + if not os.path.exists(info_path): + continue + with open(info_path, 'rb') as f: + infos = pickle.load(f) + pandaset_infos.extend(infos) + + self.pandaset_infos.extend(pandaset_infos) + + if self.logger is not None: + self.logger.info('Total samples for PandaSet dataset ({}): {}'.format(self.mode, len(pandaset_infos))) + + + def set_split(self, split): + self.sequences = self.dataset_cfg.SEQUENCES[split] + self.split = split + + + def __len__(self): + return len(self.pandaset_infos) + + + def __getitem__(self, index): + """ + To support a custom dataset, implement this function to load the raw data (and labels), then transform them to + the unified normative coordinate (x pointing forward, z pointing upwards) and call the function self.prepare_data() to process the data and send them + to the model. + + Args: + index: + + Returns: + + """ + info = self.pandaset_infos[index] + seq_idx = info['sequence'] + + pose = self._get_pose(info) + points = self._get_lidar_points(info, pose) + boxes, labels, zrot_world_to_ego = self._get_annotations(info, pose) + pose_np = pose_dict_to_numpy(pose) + + input_dict = {'points': points, + 'gt_boxes': boxes, + 'gt_names': labels, + 'sequence': int(seq_idx), + 'frame_idx': info['frame_idx'], + 'zrot_world_to_ego': zrot_world_to_ego, + 'pose': pose_dict_to_numpy(pose) + } + # seq_idx is converted to int because strings can't be passed to + # the gpu in pytorch + # zrot_world_to_ego is propagated in order to be able to transform the + # predicted yaws back to world coordinates + + data_dict = self.prepare_data(data_dict=input_dict) + + return data_dict + + + def _get_pose(self, info): + seq_idx = info['sequence'] + # get pose for world to ego frame transformation + if self.dataset[seq_idx].lidar.poses is None: + self.dataset[seq_idx].lidar._load_poses() + + pose = self.dataset[seq_idx].lidar.poses[info['frame_idx']] + + return pose + + + def _get_lidar_points(self, info, pose): + """ + Get lidar in the unified normative coordinate system for a given frame + The intensity is normalized to fit [0-1] range (pandaset intensity is in [0-255] range) + """ + # get lidar points + lidar_frame = pd.read_pickle(info['lidar_path']) + # get points for the required lidar(s) only + device = self.dataset_cfg.get('LIDAR_DEVICE', 0) + if device != -1: + lidar_frame = lidar_frame[lidar_frame.d == device] + world_points = lidar_frame.to_numpy() + # There seems to be issues with the automatic deletion of pandas datasets sometimes + del lidar_frame + + points_loc = world_points[:, :3] + points_int = world_points[:, 3] + + # nromalize intensity + points_int = points_int / 255 + + ego_points = ps.geometry.lidar_points_to_ego(points_loc, pose) + # Pandaset ego coordinates are: + # - x pointing to the right + # - y pointing to the front + # - z pointing up + # Normative coordinates are: + # - x pointing foreward + # - y pointings to the left + # - z pointing to the top + # So a transformation is required to the match the normative coordinates + ego_points = ego_points[:, [1, 0, 2]] # switch x and y + ego_points[:, 1] = - ego_points[:, 1] # revert y axis + + return np.append(ego_points, np.expand_dims(points_int, axis=1), axis=1).astype(np.float32) + + + def _get_annotations(self,info, pose): + """ + Get box informations in the unified normative coordinate system for a given frame + """ + + # get boxes + cuboids = pd.read_pickle(info["cuboids_path"]) + device = self.dataset_cfg.get('LIDAR_DEVICE', 0) + if device != -1: + # keep cuboids that are seen by a given device + cuboids = cuboids[cuboids["cuboids.sensor_id"] != 1 - device] + + xs = cuboids['position.x'].to_numpy() + ys = cuboids['position.y'].to_numpy() + zs = cuboids['position.z'].to_numpy() + dxs = cuboids['dimensions.x'].to_numpy() + dys = cuboids['dimensions.y'].to_numpy() + dzs = cuboids['dimensions.z'].to_numpy() + yaws = cuboids['yaw'].to_numpy() + labels = cuboids['label'].to_numpy() + + del cuboids # There seem to be issues with the automatic deletion of pandas datasets sometimes + + labels = np.array([self.dataset_cfg.TRAINING_CATEGORIES.get(lab, lab) + for lab in labels] ) + + # Compute the center points coordinates in ego coordinates + centers = np.vstack([xs, ys, zs]).T + ego_centers = ps.geometry.lidar_points_to_ego(centers, pose) + + # Compute the yaw in ego coordinates + # The following implementation supposes that the pitch of the car is + # negligible compared to its yaw, in order to be able to express the + # bbox coordinates in the ego coordinate system with an {axis aligned + # box + yaw} only representation + yaxis_points_from_pose = ps.geometry.lidar_points_to_ego(np.array([[0, 0, 0], [0, 1., 0]]), pose) + yaxis_from_pose = yaxis_points_from_pose[1, :] - yaxis_points_from_pose[0, :] + + if yaxis_from_pose[-1] >= 10**-1: + if self.logger is not None: + self.logger.warning("The car's pitch is supposed to be negligible " + + "sin(pitch) is >= 10**-1 ({})".format(yaxis_from_pose[-1])) + + # rotation angle in rads of the y axis around thz z axis + zrot_world_to_ego = np.arctan2(-yaxis_from_pose[0], yaxis_from_pose[1]) + ego_yaws = yaws + zrot_world_to_ego + + # Pandaset ego coordinates are: + # - x pointing to the right + # - y pointing to the front + # - z pointing up + # Normative coordinates are: + # - x pointing foreward + # - y pointings to the left + # - z pointing to the top + # So a transformation is required to the match the normative coordinates + ego_xs = ego_centers[:, 1] + ego_ys = -ego_centers[:, 0] + ego_zs = ego_centers[:, 2] + ego_dxs = dys + ego_dys = dxs # stays >= 0 + ego_dzs = dzs + + ego_boxes = np.vstack([ego_xs, ego_ys, ego_zs, ego_dxs, ego_dys, ego_dzs, ego_yaws]).T + + return ego_boxes.astype(np.float32), labels, zrot_world_to_ego + + + @staticmethod + def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None): + """ + To support a custom dataset, implement this function to receive the predicted results from the model, and then + transform the unified normative coordinate to your required coordinate, and optionally save them to disk. + + Args: + batch_dict: dict of original data from the dataloader + pred_dicts: dict of predicted results from the model + pred_boxes: (N, 7), Tensor + pred_scores: (N), Tensor + pred_labels: (N), Tensor + class_names: + output_path: if it is not None, save the results to this path + Returns: + + """ + + def generate_single_sample_dataframe(batch_index, box_dict, zrot_world_to_ego, pose): + pred_boxes = box_dict["pred_boxes"].cpu().numpy() + pred_scores = box_dict["pred_scores"].cpu().numpy() + pred_labels = box_dict["pred_labels"].cpu().numpy() + zrot = zrot_world_to_ego.cpu().numpy() + pose_dict = pose_numpy_to_dict(pose.cpu().numpy()) + + xs = pred_boxes[:, 0] + ys = pred_boxes[:, 1] + zs = pred_boxes[:, 2] + dxs = pred_boxes[:, 3] + dys = pred_boxes[:, 4] + dzs = pred_boxes[:, 5] + yaws = pred_boxes[:, 6] + names = np.array(class_names)[pred_labels - 1] # Predicted labels start on 1 + + # convert from normative coordinates to pandaset ego coordinates + ego_xs = - ys + ego_ys = xs + ego_zs = zs + ego_dxs = dys + ego_dys = dxs + ego_dzs = dzs + ego_yaws = yaws + + # convert from pandaset ego coordinates to world coordinates + # for the moment, an simplified estimation of the ego yaw is computed in __getitem__ + # which sets ego_yaw = world_yaw + zrot_world_to_ego + world_yaws = ego_yaws - zrot + + ego_centers = np.vstack([ego_xs, ego_ys, ego_zs]).T + world_centers = ps.geometry.ego_to_lidar_points(ego_centers, pose_dict) + world_xs = world_centers[:, 0] + world_ys = world_centers[:, 1] + world_zs = world_centers[:, 2] + # dx, dy, dz remain unchanged as the bbox orientation is handled by + # the yaw information + + data_dict = {'position.x': world_xs, + 'position.y': world_ys, + 'position.z': world_zs, + 'dimensions.x': ego_dxs, + 'dimensions.y': ego_dys, + 'dimensions.z': ego_dzs, + 'yaw': world_yaws % (2 * np.pi), + 'label': names, + 'score': pred_scores + } + + return pd.DataFrame(data_dict) + + + annos = [] + for index, box_dict in enumerate(pred_dicts): + frame_idx = batch_dict['frame_idx'][index] + seq_idx = batch_dict['sequence'][index] + zrot = batch_dict['zrot_world_to_ego'][index] + pose = batch_dict['pose'][index] + + single_pred_df = generate_single_sample_dataframe(index, box_dict, zrot, pose) + + + single_pred_dict = {'preds' : single_pred_df, + # 'name 'ensures testing the number of detections in a compatible format as kitti + 'name' : single_pred_df['label'].tolist(), + 'frame_idx': frame_idx, + 'sequence': str(seq_idx).zfill(3)} + # seq_idx was converted to int in self.__getitem__` because strings + # can't be passed to the gpu in pytorch. + # To convert it back to a string, we assume that the sequence is + # provided in pandaset format with 3 digits + + if output_path is not None: + frame_id = str(int(frame_idx)).zfill(2) + seq_id = str(int(seq_idx)).zfill(3) + cur_det_file = os.path.join(output_path, seq_id, 'predictions', + 'cuboids', ("{}.pkl.gz".format(frame_id))) + os.makedirs(os.path.dirname(cur_det_file), exist_ok=True) + single_pred_df.to_pickle(cur_det_file) + + annos.append(single_pred_dict) + + return annos + + + def get_infos(self): + """ + Generate the dataset infos dict for each sample of the dataset. + For each sample, this dict contains: + - the sequence index + - the frame index + - the path to the lidar data + - the path to the bounding box annotations + """ + infos = [] + for seq in self.sequences: + s = self.dataset[seq] + s.load_lidar() + if len(s.lidar.data) > 100: + raise ValueError("The implementation for this dataset assumes that each sequence is " + + "no longer than 100 frames. The current sequence has {}".format(len(s.lidar.data))) + info = [{'sequence': seq, + 'frame_idx': ii, + 'lidar_path': os.path.join(self.root_path, 'dataset', seq, 'lidar', ("{:02d}.pkl.gz".format(ii))), + 'cuboids_path': os.path.join(self.root_path, 'dataset', seq, + 'annotations', 'cuboids', ("{:02d}.pkl.gz".format(ii))) + } for ii in range(len(s.lidar.data))] + infos.extend(info) + del self.dataset._sequences[seq] + + return infos + + + def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'): + database_save_path = os.path.join(self.root_path, + 'gt_database' if split == 'train' else 'gt_database_{}'.format(split)) + db_info_save_path = os.path.join(self.root_path, + 'pandaset_dbinfos_{}.pkl'.format(split)) + + os.makedirs(database_save_path, exist_ok=True) + all_db_infos = {} + + with open(info_path, 'rb') as f: + infos = pickle.load(f) + + for k in range(len(infos)): + print('gt_database sample: %d/%d' % (k + 1, len(infos))) + info = infos[k] + sample_idx = info['frame_idx'] + pose = self._get_pose(info) + points = self._get_lidar_points(info, pose) + gt_boxes, names, _ = self._get_annotations(info, pose) + + num_obj = gt_boxes.shape[0] + + point_indices = roiaware_pool3d_utils.points_in_boxes_cpu( + torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes) + ).numpy() # (nboxes, npoints) + + for i in range(num_obj): + tmp_name = names[i].replace("/", "").replace(" ", "") + filename = '%s_%s_%d.bin' % (sample_idx, tmp_name, i) + filepath = os.path.join(database_save_path, filename) + gt_points = points[point_indices[i] > 0] + gt_points[:, :3] -= gt_boxes[i, :3] + with open(filepath, 'wb') as f: + gt_points.tofile(f) + + if (used_classes is None) or names[i] in used_classes: + db_path = os.path.relpath(filepath, self.root_path) # gt_database/xxxxx.bin + db_info = {'name': names[i], 'path': db_path, 'gt_idx': i, + 'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0], + 'difficulty': -1} + if names[i] in all_db_infos: + all_db_infos[names[i]].append(db_info) + else: + all_db_infos[names[i]] = [db_info] + for k, v in all_db_infos.items(): + print('Database %s: %d' % (k, len(v))) + + with open(db_info_save_path, 'wb') as f: + pickle.dump(all_db_infos, f) + + + def evaluation(self, det_annos, class_names, **kwargs): + self.logger.warning('Evaluation is not implemented for Pandaset as there is no official one. ' + + 'Returning an empty evaluation result.') + ap_result_str = '' + ap_dict = {} + + return ap_result_str, ap_dict + + +def create_pandaset_infos(dataset_cfg, class_names, data_path, save_path): + """ + Create dataset_infos files in order not to have it in a preprocessed pickle + file with the info for each sample + See PandasetDataset.get_infos for further details. + """ + dataset = PandasetDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False) + for split in ["train", "val", "test"]: + print("---------------- Start to generate {} data infos ---------------".format(split)) + dataset.set_split(split) + infos = dataset.get_infos() + file_path = os.path.join(save_path, 'pandaset_infos_{}.pkl'.format(split)) + with open(file_path, 'wb') as f: + pickle.dump(infos, f) + print("Pandaset info {} file is saved to {}".format(split, file_path)) + + print('------------Start create groundtruth database for data augmentation-----------') + dataset = PandasetDataset(dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, training=False) + dataset.set_split("train") + dataset.create_groundtruth_database( + os.path.join(save_path, 'pandaset_infos_train.pkl'), + split="train" + ) + print('---------------Data preparation Done---------------') + + +if __name__ == '__main__': + import sys + if sys.argv.__len__() > 1 and sys.argv[1] == 'create_pandaset_infos': + import yaml + from pathlib import Path + from easydict import EasyDict + dataset_cfg = EasyDict(yaml.safe_load(open(sys.argv[2]))) + ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve() + create_pandaset_infos( + dataset_cfg=dataset_cfg, + class_names=['Car', 'Pedestrian', 'Cyclist'], + data_path=ROOT_DIR / 'data' / 'pandaset', + save_path=ROOT_DIR / 'data' / 'pandaset' + ) + + + + diff --git a/toolbox/openpcdet/pcdet/datasets/processor/__init__.py b/toolbox/openpcdet/pcdet/datasets/processor/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/datasets/processor/data_processor.py b/toolbox/openpcdet/pcdet/datasets/processor/data_processor.py new file mode 100644 index 000000000..4f72ab532 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/processor/data_processor.py @@ -0,0 +1,298 @@ +from functools import partial + +import numpy as np +from skimage import transform +import torch +import torchvision +from ...utils import box_utils, common_utils + +tv = None +try: + import cumm.tensorview as tv +except: + pass + + +class VoxelGeneratorWrapper(): + def __init__(self, vsize_xyz, coors_range_xyz, num_point_features, max_num_points_per_voxel, max_num_voxels): + try: + from spconv.utils import VoxelGeneratorV2 as VoxelGenerator + self.spconv_ver = 1 + except: + try: + from spconv.utils import VoxelGenerator + self.spconv_ver = 1 + except: + from spconv.utils import Point2VoxelCPU3d as VoxelGenerator + self.spconv_ver = 2 + + if self.spconv_ver == 1: + self._voxel_generator = VoxelGenerator( + voxel_size=vsize_xyz, + point_cloud_range=coors_range_xyz, + max_num_points=max_num_points_per_voxel, + max_voxels=max_num_voxels + ) + else: + self._voxel_generator = VoxelGenerator( + vsize_xyz=vsize_xyz, + coors_range_xyz=coors_range_xyz, + num_point_features=num_point_features, + max_num_points_per_voxel=max_num_points_per_voxel, + max_num_voxels=max_num_voxels + ) + + def generate(self, points): + if self.spconv_ver == 1: + voxel_output = self._voxel_generator.generate(points) + if isinstance(voxel_output, dict): + voxels, coordinates, num_points = \ + voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel'] + else: + voxels, coordinates, num_points = voxel_output + else: + assert tv is not None, f"Unexpected error, library: 'cumm' wasn't imported properly." + voxel_output = self._voxel_generator.point_to_voxel(tv.from_numpy(points)) + tv_voxels, tv_coordinates, tv_num_points = voxel_output + # make copy with numpy(), since numpy_view() will disappear as soon as the generator is deleted + voxels = tv_voxels.numpy() + coordinates = tv_coordinates.numpy() + num_points = tv_num_points.numpy() + return voxels, coordinates, num_points + + +class DataProcessor(object): + def __init__(self, processor_configs, point_cloud_range, training, num_point_features): + self.point_cloud_range = point_cloud_range + self.training = training + self.num_point_features = num_point_features + self.mode = 'train' if training else 'test' + self.grid_size = self.voxel_size = None + self.data_processor_queue = [] + + self.voxel_generator = None + + for cur_cfg in processor_configs: + cur_processor = getattr(self, cur_cfg.NAME)(config=cur_cfg) + self.data_processor_queue.append(cur_processor) + + def mask_points_and_boxes_outside_range(self, data_dict=None, config=None): + if data_dict is None: + return partial(self.mask_points_and_boxes_outside_range, config=config) + + if data_dict.get('points', None) is not None: + mask = common_utils.mask_points_by_range(data_dict['points'], self.point_cloud_range) + data_dict['points'] = data_dict['points'][mask] + + if data_dict.get('gt_boxes', None) is not None and config.REMOVE_OUTSIDE_BOXES and self.training: + mask = box_utils.mask_boxes_outside_range_numpy( + data_dict['gt_boxes'], self.point_cloud_range, min_num_corners=config.get('min_num_corners', 1), + use_center_to_filter=config.get('USE_CENTER_TO_FILTER', True) + ) + data_dict['gt_boxes'] = data_dict['gt_boxes'][mask] + return data_dict + + def shuffle_points(self, data_dict=None, config=None): + if data_dict is None: + return partial(self.shuffle_points, config=config) + + if config.SHUFFLE_ENABLED[self.mode]: + points = data_dict['points'] + shuffle_idx = np.random.permutation(points.shape[0]) + points = points[shuffle_idx] + data_dict['points'] = points + + return data_dict + + def transform_points_to_voxels_placeholder(self, data_dict=None, config=None): + # just calculate grid size + if data_dict is None: + grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(config.VOXEL_SIZE) + self.grid_size = np.round(grid_size).astype(np.int64) + self.voxel_size = config.VOXEL_SIZE + return partial(self.transform_points_to_voxels_placeholder, config=config) + + return data_dict + + def double_flip(self, points): + # y flip + points_yflip = points.copy() + points_yflip[:, 1] = -points_yflip[:, 1] + + # x flip + points_xflip = points.copy() + points_xflip[:, 0] = -points_xflip[:, 0] + + # x y flip + points_xyflip = points.copy() + points_xyflip[:, 0] = -points_xyflip[:, 0] + points_xyflip[:, 1] = -points_xyflip[:, 1] + + return points_yflip, points_xflip, points_xyflip + + def transform_points_to_voxels(self, data_dict=None, config=None): + if data_dict is None: + grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(config.VOXEL_SIZE) + self.grid_size = np.round(grid_size).astype(np.int64) + self.voxel_size = config.VOXEL_SIZE + # just bind the config, we will create the VoxelGeneratorWrapper later, + # to avoid pickling issues in multiprocess spawn + return partial(self.transform_points_to_voxels, config=config) + + if self.voxel_generator is None: + self.voxel_generator = VoxelGeneratorWrapper( + vsize_xyz=config.VOXEL_SIZE, + coors_range_xyz=self.point_cloud_range, + num_point_features=self.num_point_features, + max_num_points_per_voxel=config.MAX_POINTS_PER_VOXEL, + max_num_voxels=config.MAX_NUMBER_OF_VOXELS[self.mode], + ) + + points = data_dict['points'] + voxel_output = self.voxel_generator.generate(points) + voxels, coordinates, num_points = voxel_output + + if not data_dict['use_lead_xyz']: + voxels = voxels[..., 3:] # remove xyz in voxels(N, 3) + + if config.get('DOUBLE_FLIP', False): + voxels_list, voxel_coords_list, voxel_num_points_list = [voxels], [coordinates], [num_points] + points_yflip, points_xflip, points_xyflip = self.double_flip(points) + points_list = [points_yflip, points_xflip, points_xyflip] + keys = ['yflip', 'xflip', 'xyflip'] + for i, key in enumerate(keys): + voxel_output = self.voxel_generator.generate(points_list[i]) + voxels, coordinates, num_points = voxel_output + + if not data_dict['use_lead_xyz']: + voxels = voxels[..., 3:] + voxels_list.append(voxels) + voxel_coords_list.append(coordinates) + voxel_num_points_list.append(num_points) + + data_dict['voxels'] = voxels_list + data_dict['voxel_coords'] = voxel_coords_list + data_dict['voxel_num_points'] = voxel_num_points_list + else: + data_dict['voxels'] = voxels + data_dict['voxel_coords'] = coordinates + data_dict['voxel_num_points'] = num_points + return data_dict + + def sample_points(self, data_dict=None, config=None): + if data_dict is None: + return partial(self.sample_points, config=config) + + num_points = config.NUM_POINTS[self.mode] + if num_points == -1: + return data_dict + + points = data_dict['points'] + if num_points < len(points): + pts_depth = np.linalg.norm(points[:, 0:3], axis=1) + pts_near_flag = pts_depth < 40.0 + far_idxs_choice = np.where(pts_near_flag == 0)[0] + near_idxs = np.where(pts_near_flag == 1)[0] + choice = [] + if num_points > len(far_idxs_choice): + near_idxs_choice = np.random.choice(near_idxs, num_points - len(far_idxs_choice), replace=False) + choice = np.concatenate((near_idxs_choice, far_idxs_choice), axis=0) \ + if len(far_idxs_choice) > 0 else near_idxs_choice + else: + choice = np.arange(0, len(points), dtype=np.int32) + choice = np.random.choice(choice, num_points, replace=False) + np.random.shuffle(choice) + else: + choice = np.arange(0, len(points), dtype=np.int32) + if num_points > len(points): + extra_choice = np.random.choice(choice, num_points - len(points), replace=False) + choice = np.concatenate((choice, extra_choice), axis=0) + np.random.shuffle(choice) + data_dict['points'] = points[choice] + return data_dict + + def calculate_grid_size(self, data_dict=None, config=None): + if data_dict is None: + grid_size = (self.point_cloud_range[3:6] - self.point_cloud_range[0:3]) / np.array(config.VOXEL_SIZE) + self.grid_size = np.round(grid_size).astype(np.int64) + self.voxel_size = config.VOXEL_SIZE + return partial(self.calculate_grid_size, config=config) + return data_dict + + def downsample_depth_map(self, data_dict=None, config=None): + if data_dict is None: + self.depth_downsample_factor = config.DOWNSAMPLE_FACTOR + return partial(self.downsample_depth_map, config=config) + + data_dict['depth_maps'] = transform.downscale_local_mean( + image=data_dict['depth_maps'], + factors=(self.depth_downsample_factor, self.depth_downsample_factor) + ) + return data_dict + + def image_normalize(self, data_dict=None, config=None): + if data_dict is None: + return partial(self.image_normalize, config=config) + mean = config.mean + std = config.std + compose = torchvision.transforms.Compose( + [ + torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize(mean=mean, std=std), + ] + ) + data_dict["camera_imgs"] = [compose(img) for img in data_dict["camera_imgs"]] + return data_dict + + def image_calibrate(self,data_dict=None, config=None): + if data_dict is None: + return partial(self.image_calibrate, config=config) + img_process_infos = data_dict['img_process_infos'] + transforms = [] + for img_process_info in img_process_infos: + resize, crop, flip, rotate = img_process_info + + rotation = torch.eye(2) + translation = torch.zeros(2) + # post-homography transformation + rotation *= resize + translation -= torch.Tensor(crop[:2]) + if flip: + A = torch.Tensor([[-1, 0], [0, 1]]) + b = torch.Tensor([crop[2] - crop[0], 0]) + rotation = A.matmul(rotation) + translation = A.matmul(translation) + b + theta = rotate / 180 * np.pi + A = torch.Tensor( + [ + [np.cos(theta), np.sin(theta)], + [-np.sin(theta), np.cos(theta)], + ] + ) + b = torch.Tensor([crop[2] - crop[0], crop[3] - crop[1]]) / 2 + b = A.matmul(-b) + b + rotation = A.matmul(rotation) + translation = A.matmul(translation) + b + transform = torch.eye(4) + transform[:2, :2] = rotation + transform[:2, 3] = translation + transforms.append(transform.numpy()) + data_dict["img_aug_matrix"] = transforms + return data_dict + + def forward(self, data_dict): + """ + Args: + data_dict: + points: (N, 3 + C_in) + gt_boxes: optional, (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...] + gt_names: optional, (N), string + ... + + Returns: + """ + + for cur_processor in self.data_processor_queue: + data_dict = cur_processor(data_dict=data_dict) + + return data_dict diff --git a/toolbox/openpcdet/pcdet/datasets/processor/point_feature_encoder.py b/toolbox/openpcdet/pcdet/datasets/processor/point_feature_encoder.py new file mode 100644 index 000000000..d22bce90f --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/processor/point_feature_encoder.py @@ -0,0 +1,57 @@ +import numpy as np + + +class PointFeatureEncoder(object): + def __init__(self, config, point_cloud_range=None): + super().__init__() + self.point_encoding_config = config + assert list(self.point_encoding_config.src_feature_list[0:3]) == ['x', 'y', 'z'] + self.used_feature_list = self.point_encoding_config.used_feature_list + self.src_feature_list = self.point_encoding_config.src_feature_list + self.point_cloud_range = point_cloud_range + + @property + def num_point_features(self): + return getattr(self, self.point_encoding_config.encoding_type)(points=None) + + def forward(self, data_dict): + """ + Args: + data_dict: + points: (N, 3 + C_in) + ... + Returns: + data_dict: + points: (N, 3 + C_out), + use_lead_xyz: whether to use xyz as point-wise features + ... + """ + data_dict['points'], use_lead_xyz = getattr(self, self.point_encoding_config.encoding_type)( + data_dict['points'] + ) + data_dict['use_lead_xyz'] = use_lead_xyz + + if self.point_encoding_config.get('filter_sweeps', False) and 'timestamp' in self.src_feature_list: + max_sweeps = self.point_encoding_config.max_sweeps + idx = self.src_feature_list.index('timestamp') + dt = np.round(data_dict['points'][:, idx], 2) + max_dt = sorted(np.unique(dt))[min(len(np.unique(dt))-1, max_sweeps-1)] + data_dict['points'] = data_dict['points'][dt <= max_dt] + + return data_dict + + def absolute_coordinates_encoding(self, points=None): + if points is None: + num_output_features = len(self.used_feature_list) + return num_output_features + + assert points.shape[-1] == len(self.src_feature_list) + point_feature_list = [points[:, 0:3]] + for x in self.used_feature_list: + if x in ['x', 'y', 'z']: + continue + idx = self.src_feature_list.index(x) + point_feature_list.append(points[:, idx:idx+1]) + point_features = np.concatenate(point_feature_list, axis=1) + + return point_features, True diff --git a/toolbox/openpcdet/pcdet/datasets/waymo/__init__.py b/toolbox/openpcdet/pcdet/datasets/waymo/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/datasets/waymo/waymo_dataset.py b/toolbox/openpcdet/pcdet/datasets/waymo/waymo_dataset.py new file mode 100644 index 000000000..44632d53f --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/waymo/waymo_dataset.py @@ -0,0 +1,827 @@ +# OpenPCDet PyTorch Dataloader and Evaluation Tools for Waymo Open Dataset +# Reference https://github.com/open-mmlab/OpenPCDet +# Written by Shaoshuai Shi, Chaoxu Guo +# All Rights Reserved. + +import os +import pickle +import copy +import numpy as np +import torch +import multiprocessing +import SharedArray +import torch.distributed as dist +from tqdm import tqdm +from pathlib import Path +from functools import partial + +from ...ops.roiaware_pool3d import roiaware_pool3d_utils +from ...utils import box_utils, common_utils +from ..dataset import DatasetTemplate + + +class WaymoDataset(DatasetTemplate): + def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None): + super().__init__( + dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger + ) + self.data_path = self.root_path / self.dataset_cfg.PROCESSED_DATA_TAG + self.split = self.dataset_cfg.DATA_SPLIT[self.mode] + split_dir = self.root_path / 'ImageSets' / (self.split + '.txt') + self.sample_sequence_list = [x.strip() for x in open(split_dir).readlines()] + + self.infos = [] + self.seq_name_to_infos = self.include_waymo_data(self.mode) + + self.use_shared_memory = self.dataset_cfg.get('USE_SHARED_MEMORY', False) and self.training + if self.use_shared_memory: + self.shared_memory_file_limit = self.dataset_cfg.get('SHARED_MEMORY_FILE_LIMIT', 0x7FFFFFFF) + self.load_data_to_shared_memory() + + if self.dataset_cfg.get('USE_PREDBOX', False): + self.pred_boxes_dict = self.load_pred_boxes_to_dict( + pred_boxes_path=self.dataset_cfg.ROI_BOXES_PATH[self.mode] + ) + else: + self.pred_boxes_dict = {} + + def set_split(self, split): + super().__init__( + dataset_cfg=self.dataset_cfg, class_names=self.class_names, training=self.training, + root_path=self.root_path, logger=self.logger + ) + self.split = split + split_dir = self.root_path / 'ImageSets' / (self.split + '.txt') + self.sample_sequence_list = [x.strip() for x in open(split_dir).readlines()] + self.infos = [] + self.seq_name_to_infos = self.include_waymo_data(self.mode) + + def include_waymo_data(self, mode): + self.logger.info('Loading Waymo dataset') + waymo_infos = [] + seq_name_to_infos = {} + + num_skipped_infos = 0 + for k in range(len(self.sample_sequence_list)): + sequence_name = os.path.splitext(self.sample_sequence_list[k])[0] + info_path = self.data_path / sequence_name / ('%s.pkl' % sequence_name) + info_path = self.check_sequence_name_with_all_version(info_path) + if not info_path.exists(): + num_skipped_infos += 1 + continue + with open(info_path, 'rb') as f: + infos = pickle.load(f) + waymo_infos.extend(infos) + + seq_name_to_infos[infos[0]['point_cloud']['lidar_sequence']] = infos + + self.infos.extend(waymo_infos[:]) + self.logger.info('Total skipped info %s' % num_skipped_infos) + self.logger.info('Total samples for Waymo dataset: %d' % (len(waymo_infos))) + + if self.dataset_cfg.SAMPLED_INTERVAL[mode] > 1: + sampled_waymo_infos = [] + for k in range(0, len(self.infos), self.dataset_cfg.SAMPLED_INTERVAL[mode]): + sampled_waymo_infos.append(self.infos[k]) + self.infos = sampled_waymo_infos + self.logger.info('Total sampled samples for Waymo dataset: %d' % len(self.infos)) + + use_sequence_data = self.dataset_cfg.get('SEQUENCE_CONFIG', None) is not None and self.dataset_cfg.SEQUENCE_CONFIG.ENABLED + if not use_sequence_data: + seq_name_to_infos = None + return seq_name_to_infos + + def load_pred_boxes_to_dict(self, pred_boxes_path): + self.logger.info(f'Loading and reorganizing pred_boxes to dict from path: {pred_boxes_path}') + with open(pred_boxes_path, 'rb') as f: + pred_dicts = pickle.load(f) + + pred_boxes_dict = {} + for index, box_dict in enumerate(pred_dicts): + seq_name = box_dict['frame_id'][:-4].replace('training_', '').replace('validation_', '') + sample_idx = int(box_dict['frame_id'][-3:]) + + if seq_name not in pred_boxes_dict: + pred_boxes_dict[seq_name] = {} + + pred_labels = np.array([self.class_names.index(box_dict['name'][k]) + 1 for k in range(box_dict['name'].shape[0])]) + pred_boxes = np.concatenate((box_dict['boxes_lidar'], box_dict['score'][:, np.newaxis], pred_labels[:, np.newaxis]), axis=-1) + pred_boxes_dict[seq_name][sample_idx] = pred_boxes + + self.logger.info(f'Predicted boxes has been loaded, total sequences: {len(pred_boxes_dict)}') + return pred_boxes_dict + + def load_data_to_shared_memory(self): + self.logger.info(f'Loading training data to shared memory (file limit={self.shared_memory_file_limit})') + + cur_rank, num_gpus = common_utils.get_dist_info() + all_infos = self.infos[:self.shared_memory_file_limit] \ + if self.shared_memory_file_limit < len(self.infos) else self.infos + cur_infos = all_infos[cur_rank::num_gpus] + for info in cur_infos: + pc_info = info['point_cloud'] + sequence_name = pc_info['lidar_sequence'] + sample_idx = pc_info['sample_idx'] + + sa_key = f'{sequence_name}___{sample_idx}' + if os.path.exists(f"/dev/shm/{sa_key}"): + continue + + points = self.get_lidar(sequence_name, sample_idx) + common_utils.sa_create(f"shm://{sa_key}", points) + + dist.barrier() + self.logger.info('Training data has been saved to shared memory') + + def clean_shared_memory(self): + self.logger.info(f'Clean training data from shared memory (file limit={self.shared_memory_file_limit})') + + cur_rank, num_gpus = common_utils.get_dist_info() + all_infos = self.infos[:self.shared_memory_file_limit] \ + if self.shared_memory_file_limit < len(self.infos) else self.infos + cur_infos = all_infos[cur_rank::num_gpus] + for info in cur_infos: + pc_info = info['point_cloud'] + sequence_name = pc_info['lidar_sequence'] + sample_idx = pc_info['sample_idx'] + + sa_key = f'{sequence_name}___{sample_idx}' + if not os.path.exists(f"/dev/shm/{sa_key}"): + continue + + SharedArray.delete(f"shm://{sa_key}") + + if num_gpus > 1: + dist.barrier() + self.logger.info('Training data has been deleted from shared memory') + + @staticmethod + def check_sequence_name_with_all_version(sequence_file): + if not sequence_file.exists(): + found_sequence_file = sequence_file + for pre_text in ['training', 'validation', 'testing']: + if not sequence_file.exists(): + temp_sequence_file = Path(str(sequence_file).replace('segment', pre_text + '_segment')) + if temp_sequence_file.exists(): + found_sequence_file = temp_sequence_file + break + if not found_sequence_file.exists(): + found_sequence_file = Path(str(sequence_file).replace('_with_camera_labels', '')) + if found_sequence_file.exists(): + sequence_file = found_sequence_file + return sequence_file + + def get_infos(self, raw_data_path, save_path, num_workers=multiprocessing.cpu_count(), has_label=True, sampled_interval=1, update_info_only=False): + from . import waymo_utils + print('---------------The waymo sample interval is %d, total sequecnes is %d-----------------' + % (sampled_interval, len(self.sample_sequence_list))) + + process_single_sequence = partial( + waymo_utils.process_single_sequence, + save_path=save_path, sampled_interval=sampled_interval, has_label=has_label, update_info_only=update_info_only + ) + sample_sequence_file_list = [ + self.check_sequence_name_with_all_version(raw_data_path / sequence_file) + for sequence_file in self.sample_sequence_list + ] + + # process_single_sequence(sample_sequence_file_list[0]) + with multiprocessing.Pool(num_workers) as p: + sequence_infos = list(tqdm(p.imap(process_single_sequence, sample_sequence_file_list), + total=len(sample_sequence_file_list))) + + all_sequences_infos = [item for infos in sequence_infos for item in infos] + return all_sequences_infos + + def get_lidar(self, sequence_name, sample_idx): + lidar_file = self.data_path / sequence_name / ('%04d.npy' % sample_idx) + point_features = np.load(lidar_file) # (N, 7): [x, y, z, intensity, elongation, NLZ_flag] + + points_all, NLZ_flag = point_features[:, 0:5], point_features[:, 5] + if not self.dataset_cfg.get('DISABLE_NLZ_FLAG_ON_POINTS', False): + points_all = points_all[NLZ_flag == -1] + if self.dataset_cfg.get('POINTS_TANH_DIM', None) is None: + points_all[:, 3] = np.tanh(points_all[:, 3]) + else: + for dim_idx in self.dataset_cfg.POINTS_TANH_DIM: + points_all[:, dim_idx] = np.tanh(points_all[:, dim_idx]) + return points_all + + @staticmethod + def transform_prebox_to_current(pred_boxes3d, pose_pre, pose_cur): + """ + + Args: + pred_boxes3d (N, 9 or 11): [x, y, z, dx, dy, dz, raw, score, label] + pose_pre (4, 4): + pose_cur (4, 4): + Returns: + + """ + assert pred_boxes3d.shape[-1] in [9, 11] + pred_boxes3d = pred_boxes3d.copy() + expand_bboxes = np.concatenate([pred_boxes3d[:, :3], np.ones((pred_boxes3d.shape[0], 1))], axis=-1) + + bboxes_global = np.dot(expand_bboxes, pose_pre.T)[:, :3] + expand_bboxes_global = np.concatenate([bboxes_global[:, :3],np.ones((bboxes_global.shape[0], 1))], axis=-1) + bboxes_pre2cur = np.dot(expand_bboxes_global, np.linalg.inv(pose_cur.T))[:, :3] + pred_boxes3d[:, 0:3] = bboxes_pre2cur + + if pred_boxes3d.shape[-1] == 11: + expand_vels = np.concatenate([pred_boxes3d[:, 7:9], np.zeros((pred_boxes3d.shape[0], 1))], axis=-1) + vels_global = np.dot(expand_vels, pose_pre[:3, :3].T) + vels_pre2cur = np.dot(vels_global, np.linalg.inv(pose_cur[:3, :3].T))[:,:2] + pred_boxes3d[:, 7:9] = vels_pre2cur + + pred_boxes3d[:, 6] = pred_boxes3d[..., 6] + np.arctan2(pose_pre[..., 1, 0], pose_pre[..., 0, 0]) + pred_boxes3d[:, 6] = pred_boxes3d[..., 6] - np.arctan2(pose_cur[..., 1, 0], pose_cur[..., 0, 0]) + return pred_boxes3d + + @staticmethod + def reorder_rois_for_refining(pred_bboxes): + num_max_rois = max([len(bbox) for bbox in pred_bboxes]) + num_max_rois = max(1, num_max_rois) # at least one faked rois to avoid error + ordered_bboxes = np.zeros([len(pred_bboxes), num_max_rois, pred_bboxes[0].shape[-1]], dtype=np.float32) + + for bs_idx in range(ordered_bboxes.shape[0]): + ordered_bboxes[bs_idx, :len(pred_bboxes[bs_idx])] = pred_bboxes[bs_idx] + return ordered_bboxes + + def get_sequence_data(self, info, points, sequence_name, sample_idx, sequence_cfg, load_pred_boxes=False): + """ + Args: + info: + points: + sequence_name: + sample_idx: + sequence_cfg: + Returns: + """ + + def remove_ego_points(points, center_radius=1.0): + mask = ~((np.abs(points[:, 0]) < center_radius) & (np.abs(points[:, 1]) < center_radius)) + return points[mask] + + def load_pred_boxes_from_dict(sequence_name, sample_idx): + """ + boxes: (N, 11) [x, y, z, dx, dy, dn, raw, vx, vy, score, label] + """ + sequence_name = sequence_name.replace('training_', '').replace('validation_', '') + load_boxes = self.pred_boxes_dict[sequence_name][sample_idx] + assert load_boxes.shape[-1] == 11 + load_boxes[:, 7:9] = -0.1 * load_boxes[:, 7:9] # transfer speed to negtive motion from t to t-1 + return load_boxes + + pose_cur = info['pose'].reshape((4, 4)) + num_pts_cur = points.shape[0] + sample_idx_pre_list = np.clip(sample_idx + np.arange(sequence_cfg.SAMPLE_OFFSET[0], sequence_cfg.SAMPLE_OFFSET[1]), 0, 0x7FFFFFFF) + sample_idx_pre_list = sample_idx_pre_list[::-1] + + if sequence_cfg.get('ONEHOT_TIMESTAMP', False): + onehot_cur = np.zeros((points.shape[0], len(sample_idx_pre_list) + 1)).astype(points.dtype) + onehot_cur[:, 0] = 1 + points = np.hstack([points, onehot_cur]) + else: + points = np.hstack([points, np.zeros((points.shape[0], 1)).astype(points.dtype)]) + points_pre_all = [] + num_points_pre = [] + + pose_all = [pose_cur] + pred_boxes_all = [] + if load_pred_boxes: + pred_boxes = load_pred_boxes_from_dict(sequence_name, sample_idx) + pred_boxes_all.append(pred_boxes) + + sequence_info = self.seq_name_to_infos[sequence_name] + + for idx, sample_idx_pre in enumerate(sample_idx_pre_list): + + points_pre = self.get_lidar(sequence_name, sample_idx_pre) + pose_pre = sequence_info[sample_idx_pre]['pose'].reshape((4, 4)) + expand_points_pre = np.concatenate([points_pre[:, :3], np.ones((points_pre.shape[0], 1))], axis=-1) + points_pre_global = np.dot(expand_points_pre, pose_pre.T)[:, :3] + expand_points_pre_global = np.concatenate([points_pre_global, np.ones((points_pre_global.shape[0], 1))], axis=-1) + points_pre2cur = np.dot(expand_points_pre_global, np.linalg.inv(pose_cur.T))[:, :3] + points_pre = np.concatenate([points_pre2cur, points_pre[:, 3:]], axis=-1) + if sequence_cfg.get('ONEHOT_TIMESTAMP', False): + onehot_vector = np.zeros((points_pre.shape[0], len(sample_idx_pre_list) + 1)) + onehot_vector[:, idx + 1] = 1 + points_pre = np.hstack([points_pre, onehot_vector]) + else: + # add timestamp + points_pre = np.hstack([points_pre, 0.1 * (sample_idx - sample_idx_pre) * np.ones((points_pre.shape[0], 1)).astype(points_pre.dtype)]) # one frame 0.1s + points_pre = remove_ego_points(points_pre, 1.0) + points_pre_all.append(points_pre) + num_points_pre.append(points_pre.shape[0]) + pose_all.append(pose_pre) + + if load_pred_boxes: + pose_pre = sequence_info[sample_idx_pre]['pose'].reshape((4, 4)) + pred_boxes = load_pred_boxes_from_dict(sequence_name, sample_idx_pre) + pred_boxes = self.transform_prebox_to_current(pred_boxes, pose_pre, pose_cur) + pred_boxes_all.append(pred_boxes) + + points = np.concatenate([points] + points_pre_all, axis=0).astype(np.float32) + num_points_all = np.array([num_pts_cur] + num_points_pre).astype(np.int32) + poses = np.concatenate(pose_all, axis=0).astype(np.float32) + + if load_pred_boxes: + temp_pred_boxes = self.reorder_rois_for_refining(pred_boxes_all) + pred_boxes = temp_pred_boxes[:, :, 0:9] + pred_scores = temp_pred_boxes[:, :, 9] + pred_labels = temp_pred_boxes[:, :, 10] + else: + pred_boxes = pred_scores = pred_labels = None + + return points, num_points_all, sample_idx_pre_list, poses, pred_boxes, pred_scores, pred_labels + + def __len__(self): + if self._merge_all_iters_to_one_epoch: + return len(self.infos) * self.total_epochs + + return len(self.infos) + + def __getitem__(self, index): + if self._merge_all_iters_to_one_epoch: + index = index % len(self.infos) + + info = copy.deepcopy(self.infos[index]) + pc_info = info['point_cloud'] + sequence_name = pc_info['lidar_sequence'] + sample_idx = pc_info['sample_idx'] + input_dict = { + 'sample_idx': sample_idx + } + if self.use_shared_memory and index < self.shared_memory_file_limit: + sa_key = f'{sequence_name}___{sample_idx}' + points = SharedArray.attach(f"shm://{sa_key}").copy() + else: + points = self.get_lidar(sequence_name, sample_idx) + + if self.dataset_cfg.get('SEQUENCE_CONFIG', None) is not None and self.dataset_cfg.SEQUENCE_CONFIG.ENABLED: + points, num_points_all, sample_idx_pre_list, poses, pred_boxes, pred_scores, pred_labels = self.get_sequence_data( + info, points, sequence_name, sample_idx, self.dataset_cfg.SEQUENCE_CONFIG, + load_pred_boxes=self.dataset_cfg.get('USE_PREDBOX', False) + ) + input_dict['poses'] = poses + if self.dataset_cfg.get('USE_PREDBOX', False): + input_dict.update({ + 'roi_boxes': pred_boxes, + 'roi_scores': pred_scores, + 'roi_labels': pred_labels, + }) + + input_dict.update({ + 'points': points, + 'frame_id': info['frame_id'], + }) + + if 'annos' in info: + annos = info['annos'] + annos = common_utils.drop_info_with_name(annos, name='unknown') + + if self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False): + gt_boxes_lidar = box_utils.boxes3d_kitti_fakelidar_to_lidar(annos['gt_boxes_lidar']) + else: + gt_boxes_lidar = annos['gt_boxes_lidar'] + + if self.dataset_cfg.get('TRAIN_WITH_SPEED', False): + assert gt_boxes_lidar.shape[-1] == 9 + else: + gt_boxes_lidar = gt_boxes_lidar[:, 0:7] + + if self.training and self.dataset_cfg.get('FILTER_EMPTY_BOXES_FOR_TRAIN', False): + mask = (annos['num_points_in_gt'] > 0) # filter empty boxes + annos['name'] = annos['name'][mask] + gt_boxes_lidar = gt_boxes_lidar[mask] + annos['num_points_in_gt'] = annos['num_points_in_gt'][mask] + + input_dict.update({ + 'gt_names': annos['name'], + 'gt_boxes': gt_boxes_lidar, + 'num_points_in_gt': annos.get('num_points_in_gt', None) + }) + + data_dict = self.prepare_data(data_dict=input_dict) + data_dict['metadata'] = info.get('metadata', info['frame_id']) + data_dict.pop('num_points_in_gt', None) + return data_dict + + def evaluation(self, det_annos, class_names, **kwargs): + if 'annos' not in self.infos[0].keys(): + return 'No ground-truth boxes for evaluation', {} + + def kitti_eval(eval_det_annos, eval_gt_annos): + from ..kitti.kitti_object_eval_python import eval as kitti_eval + from ..kitti import kitti_utils + + map_name_to_kitti = { + 'Vehicle': 'Car', + 'Pedestrian': 'Pedestrian', + 'Cyclist': 'Cyclist', + 'Sign': 'Sign', + 'Car': 'Car' + } + kitti_utils.transform_annotations_to_kitti_format(eval_det_annos, map_name_to_kitti=map_name_to_kitti) + kitti_utils.transform_annotations_to_kitti_format( + eval_gt_annos, map_name_to_kitti=map_name_to_kitti, + info_with_fakelidar=self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False) + ) + kitti_class_names = [map_name_to_kitti[x] for x in class_names] + ap_result_str, ap_dict = kitti_eval.get_official_eval_result( + gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=kitti_class_names + ) + return ap_result_str, ap_dict + + def waymo_eval(eval_det_annos, eval_gt_annos): + from .waymo_eval import OpenPCDetWaymoDetectionMetricsEstimator + eval = OpenPCDetWaymoDetectionMetricsEstimator() + + ap_dict = eval.waymo_evaluation( + eval_det_annos, eval_gt_annos, class_name=class_names, + distance_thresh=1000, fake_gt_infos=self.dataset_cfg.get('INFO_WITH_FAKELIDAR', False) + ) + ap_result_str = '\n' + for key in ap_dict: + ap_dict[key] = ap_dict[key][0] + ap_result_str += '%s: %.4f \n' % (key, ap_dict[key]) + + return ap_result_str, ap_dict + + eval_det_annos = copy.deepcopy(det_annos) + eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.infos] + + if kwargs['eval_metric'] == 'kitti': + ap_result_str, ap_dict = kitti_eval(eval_det_annos, eval_gt_annos) + elif kwargs['eval_metric'] == 'waymo': + ap_result_str, ap_dict = waymo_eval(eval_det_annos, eval_gt_annos) + else: + raise NotImplementedError + + return ap_result_str, ap_dict + + def create_groundtruth_database(self, info_path, save_path, used_classes=None, split='train', sampled_interval=10, + processed_data_tag=None): + + use_sequence_data = self.dataset_cfg.get('SEQUENCE_CONFIG', None) is not None and self.dataset_cfg.SEQUENCE_CONFIG.ENABLED + + if use_sequence_data: + st_frame, ed_frame = self.dataset_cfg.SEQUENCE_CONFIG.SAMPLE_OFFSET[0], self.dataset_cfg.SEQUENCE_CONFIG.SAMPLE_OFFSET[1] + self.dataset_cfg.SEQUENCE_CONFIG.SAMPLE_OFFSET[0] = min(-4, st_frame) # at least we use 5 frames for generating gt database to support various sequence configs (<= 5 frames) + st_frame = self.dataset_cfg.SEQUENCE_CONFIG.SAMPLE_OFFSET[0] + database_save_path = save_path / ('%s_gt_database_%s_sampled_%d_multiframe_%s_to_%s' % (processed_data_tag, split, sampled_interval, st_frame, ed_frame)) + db_info_save_path = save_path / ('%s_waymo_dbinfos_%s_sampled_%d_multiframe_%s_to_%s.pkl' % (processed_data_tag, split, sampled_interval, st_frame, ed_frame)) + db_data_save_path = save_path / ('%s_gt_database_%s_sampled_%d_multiframe_%s_to_%s_global.npy' % (processed_data_tag, split, sampled_interval, st_frame, ed_frame)) + else: + database_save_path = save_path / ('%s_gt_database_%s_sampled_%d' % (processed_data_tag, split, sampled_interval)) + db_info_save_path = save_path / ('%s_waymo_dbinfos_%s_sampled_%d.pkl' % (processed_data_tag, split, sampled_interval)) + db_data_save_path = save_path / ('%s_gt_database_%s_sampled_%d_global.npy' % (processed_data_tag, split, sampled_interval)) + + database_save_path.mkdir(parents=True, exist_ok=True) + all_db_infos = {} + with open(info_path, 'rb') as f: + infos = pickle.load(f) + + point_offset_cnt = 0 + stacked_gt_points = [] + for k in tqdm(range(0, len(infos), sampled_interval)): + # print('gt_database sample: %d/%d' % (k + 1, len(infos))) + info = infos[k] + + pc_info = info['point_cloud'] + sequence_name = pc_info['lidar_sequence'] + sample_idx = pc_info['sample_idx'] + points = self.get_lidar(sequence_name, sample_idx) + + if use_sequence_data: + points, num_points_all, sample_idx_pre_list, _, _, _, _ = self.get_sequence_data( + info, points, sequence_name, sample_idx, self.dataset_cfg.SEQUENCE_CONFIG + ) + + annos = info['annos'] + names = annos['name'] + difficulty = annos['difficulty'] + gt_boxes = annos['gt_boxes_lidar'] + + if k % 4 != 0 and len(names) > 0: + mask = (names == 'Vehicle') + names = names[~mask] + difficulty = difficulty[~mask] + gt_boxes = gt_boxes[~mask] + + if k % 2 != 0 and len(names) > 0: + mask = (names == 'Pedestrian') + names = names[~mask] + difficulty = difficulty[~mask] + gt_boxes = gt_boxes[~mask] + + num_obj = gt_boxes.shape[0] + if num_obj == 0: + continue + + box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu( + torch.from_numpy(points[:, 0:3]).unsqueeze(dim=0).float().cuda(), + torch.from_numpy(gt_boxes[:, 0:7]).unsqueeze(dim=0).float().cuda() + ).long().squeeze(dim=0).cpu().numpy() + + for i in range(num_obj): + filename = '%s_%04d_%s_%d.bin' % (sequence_name, sample_idx, names[i], i) + filepath = database_save_path / filename + gt_points = points[box_idxs_of_pts == i] + gt_points[:, :3] -= gt_boxes[i, :3] + + if (used_classes is None) or names[i] in used_classes: + gt_points = gt_points.astype(np.float32) + assert gt_points.dtype == np.float32 + with open(filepath, 'w') as f: + gt_points.tofile(f) + + db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin + db_info = {'name': names[i], 'path': db_path, 'sequence_name': sequence_name, + 'sample_idx': sample_idx, 'gt_idx': i, 'box3d_lidar': gt_boxes[i], + 'num_points_in_gt': gt_points.shape[0], 'difficulty': difficulty[i]} + + # it will be used if you choose to use shared memory for gt sampling + stacked_gt_points.append(gt_points) + db_info['global_data_offset'] = [point_offset_cnt, point_offset_cnt + gt_points.shape[0]] + point_offset_cnt += gt_points.shape[0] + + if names[i] in all_db_infos: + all_db_infos[names[i]].append(db_info) + else: + all_db_infos[names[i]] = [db_info] + for k, v in all_db_infos.items(): + print('Database %s: %d' % (k, len(v))) + + with open(db_info_save_path, 'wb') as f: + pickle.dump(all_db_infos, f) + + # it will be used if you choose to use shared memory for gt sampling + stacked_gt_points = np.concatenate(stacked_gt_points, axis=0) + np.save(db_data_save_path, stacked_gt_points) + + def create_gt_database_of_single_scene(self, info_with_idx, database_save_path=None, use_sequence_data=False, used_classes=None, + total_samples=0, use_cuda=False, crop_gt_with_tail=False): + info, info_idx = info_with_idx + print('gt_database sample: %d/%d' % (info_idx, total_samples)) + + all_db_infos = {} + pc_info = info['point_cloud'] + sequence_name = pc_info['lidar_sequence'] + sample_idx = pc_info['sample_idx'] + points = self.get_lidar(sequence_name, sample_idx) + + if use_sequence_data: + points, num_points_all, sample_idx_pre_list, _, _, _, _ = self.get_sequence_data( + info, points, sequence_name, sample_idx, self.dataset_cfg.SEQUENCE_CONFIG + ) + + annos = info['annos'] + names = annos['name'] + difficulty = annos['difficulty'] + gt_boxes = annos['gt_boxes_lidar'] + + if info_idx % 4 != 0 and len(names) > 0: + mask = (names == 'Vehicle') + names = names[~mask] + difficulty = difficulty[~mask] + gt_boxes = gt_boxes[~mask] + + if info_idx % 2 != 0 and len(names) > 0: + mask = (names == 'Pedestrian') + names = names[~mask] + difficulty = difficulty[~mask] + gt_boxes = gt_boxes[~mask] + + num_obj = gt_boxes.shape[0] + if num_obj == 0: + return {} + + if use_sequence_data and crop_gt_with_tail: + assert gt_boxes.shape[1] == 9 + speed = gt_boxes[:, 7:9] + sequence_cfg = self.dataset_cfg.SEQUENCE_CONFIG + assert sequence_cfg.SAMPLE_OFFSET[1] == 0 + assert sequence_cfg.SAMPLE_OFFSET[0] < 0 + num_frames = sequence_cfg.SAMPLE_OFFSET[1] - sequence_cfg.SAMPLE_OFFSET[0] + 1 + assert num_frames > 1 + latest_center = gt_boxes[:, 0:2] + oldest_center = latest_center - speed * (num_frames - 1) * 0.1 + new_center = (latest_center + oldest_center) * 0.5 + new_length = gt_boxes[:, 3] + np.linalg.norm(latest_center - oldest_center, axis=-1) + gt_boxes_crop = gt_boxes.copy() + gt_boxes_crop[:, 0:2] = new_center + gt_boxes_crop[:, 3] = new_length + + else: + gt_boxes_crop = gt_boxes + + if use_cuda: + box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu( + torch.from_numpy(points[:, 0:3]).unsqueeze(dim=0).float().cuda(), + torch.from_numpy(gt_boxes_crop[:, 0:7]).unsqueeze(dim=0).float().cuda() + ).long().squeeze(dim=0).cpu().numpy() + else: + box_point_mask = roiaware_pool3d_utils.points_in_boxes_cpu( + torch.from_numpy(points[:, 0:3]).float(), + torch.from_numpy(gt_boxes_crop[:, 0:7]).float() + ).long().numpy() # (num_boxes, num_points) + + for i in range(num_obj): + filename = '%s_%04d_%s_%d.bin' % (sequence_name, sample_idx, names[i], i) + filepath = database_save_path / filename + if use_cuda: + gt_points = points[box_idxs_of_pts == i] + else: + gt_points = points[box_point_mask[i] > 0] + + gt_points[:, :3] -= gt_boxes[i, :3] + + if (used_classes is None) or names[i] in used_classes: + gt_points = gt_points.astype(np.float32) + assert gt_points.dtype == np.float32 + with open(filepath, 'w') as f: + gt_points.tofile(f) + + db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin + db_info = {'name': names[i], 'path': db_path, 'sequence_name': sequence_name, + 'sample_idx': sample_idx, 'gt_idx': i, 'box3d_lidar': gt_boxes[i], + 'num_points_in_gt': gt_points.shape[0], 'difficulty': difficulty[i], + 'box3d_crop': gt_boxes_crop[i]} + + if names[i] in all_db_infos: + all_db_infos[names[i]].append(db_info) + else: + all_db_infos[names[i]] = [db_info] + return all_db_infos + + def create_groundtruth_database_parallel(self, info_path, save_path, used_classes=None, split='train', sampled_interval=10, + processed_data_tag=None, num_workers=16, crop_gt_with_tail=False): + use_sequence_data = self.dataset_cfg.get('SEQUENCE_CONFIG', None) is not None and self.dataset_cfg.SEQUENCE_CONFIG.ENABLED + if use_sequence_data: + st_frame, ed_frame = self.dataset_cfg.SEQUENCE_CONFIG.SAMPLE_OFFSET[0], self.dataset_cfg.SEQUENCE_CONFIG.SAMPLE_OFFSET[1] + self.dataset_cfg.SEQUENCE_CONFIG.SAMPLE_OFFSET[0] = min(-4, st_frame) # at least we use 5 frames for generating gt database to support various sequence configs (<= 5 frames) + st_frame = self.dataset_cfg.SEQUENCE_CONFIG.SAMPLE_OFFSET[0] + database_save_path = save_path / ('%s_gt_database_%s_sampled_%d_multiframe_%s_to_%s_%sparallel' % (processed_data_tag, split, sampled_interval, st_frame, ed_frame, 'tail_' if crop_gt_with_tail else '')) + db_info_save_path = save_path / ('%s_waymo_dbinfos_%s_sampled_%d_multiframe_%s_to_%s_%sparallel.pkl' % (processed_data_tag, split, sampled_interval, st_frame, ed_frame, 'tail_' if crop_gt_with_tail else '')) + else: + database_save_path = save_path / ('%s_gt_database_%s_sampled_%d_parallel' % (processed_data_tag, split, sampled_interval)) + db_info_save_path = save_path / ('%s_waymo_dbinfos_%s_sampled_%d_parallel.pkl' % (processed_data_tag, split, sampled_interval)) + + database_save_path.mkdir(parents=True, exist_ok=True) + + with open(info_path, 'rb') as f: + infos = pickle.load(f) + + print(f'Number workers: {num_workers}') + create_gt_database_of_single_scene = partial( + self.create_gt_database_of_single_scene, + use_sequence_data=use_sequence_data, database_save_path=database_save_path, + used_classes=used_classes, total_samples=len(infos), use_cuda=False, + crop_gt_with_tail=crop_gt_with_tail + ) + # create_gt_database_of_single_scene((infos[300], 0)) + with multiprocessing.Pool(num_workers) as p: + all_db_infos_list = list(p.map(create_gt_database_of_single_scene, zip(infos, np.arange(len(infos))))) + + all_db_infos = {} + + for cur_db_infos in all_db_infos_list: + for key, val in cur_db_infos.items(): + if key not in all_db_infos: + all_db_infos[key] = val + else: + all_db_infos[key].extend(val) + + for k, v in all_db_infos.items(): + print('Database %s: %d' % (k, len(v))) + + with open(db_info_save_path, 'wb') as f: + pickle.dump(all_db_infos, f) + +def create_waymo_infos(dataset_cfg, class_names, data_path, save_path, + raw_data_tag='raw_data', processed_data_tag='waymo_processed_data', + workers=min(16, multiprocessing.cpu_count()), update_info_only=False): + dataset = WaymoDataset( + dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, + training=False, logger=common_utils.create_logger() + ) + train_split, val_split = 'train', 'val' + + train_filename = save_path / ('%s_infos_%s.pkl' % (processed_data_tag, train_split)) + val_filename = save_path / ('%s_infos_%s.pkl' % (processed_data_tag, val_split)) + + os.environ["CUDA_VISIBLE_DEVICES"] = "-1" + print('---------------Start to generate data infos---------------') + + dataset.set_split(train_split) + waymo_infos_train = dataset.get_infos( + raw_data_path=data_path / raw_data_tag, + save_path=save_path / processed_data_tag, num_workers=workers, has_label=True, + sampled_interval=1, update_info_only=update_info_only + ) + with open(train_filename, 'wb') as f: + pickle.dump(waymo_infos_train, f) + print('----------------Waymo info train file is saved to %s----------------' % train_filename) + + dataset.set_split(val_split) + waymo_infos_val = dataset.get_infos( + raw_data_path=data_path / raw_data_tag, + save_path=save_path / processed_data_tag, num_workers=workers, has_label=True, + sampled_interval=1, update_info_only=update_info_only + ) + with open(val_filename, 'wb') as f: + pickle.dump(waymo_infos_val, f) + print('----------------Waymo info val file is saved to %s----------------' % val_filename) + + if update_info_only: + return + + print('---------------Start create groundtruth database for data augmentation---------------') + os.environ["CUDA_VISIBLE_DEVICES"] = "0" + dataset.set_split(train_split) + dataset.create_groundtruth_database( + info_path=train_filename, save_path=save_path, split='train', sampled_interval=1, + used_classes=['Vehicle', 'Pedestrian', 'Cyclist'], processed_data_tag=processed_data_tag + ) + print('---------------Data preparation Done---------------') + + +def create_waymo_gt_database( + dataset_cfg, class_names, data_path, save_path, processed_data_tag='waymo_processed_data', + workers=min(16, multiprocessing.cpu_count()), use_parallel=False, crop_gt_with_tail=False): + dataset = WaymoDataset( + dataset_cfg=dataset_cfg, class_names=class_names, root_path=data_path, + training=False, logger=common_utils.create_logger() + ) + train_split = 'train' + train_filename = save_path / ('%s_infos_%s.pkl' % (processed_data_tag, train_split)) + + print('---------------Start create groundtruth database for data augmentation---------------') + dataset.set_split(train_split) + + if use_parallel: + dataset.create_groundtruth_database_parallel( + info_path=train_filename, save_path=save_path, split='train', sampled_interval=1, + used_classes=['Vehicle', 'Pedestrian', 'Cyclist'], processed_data_tag=processed_data_tag, + num_workers=workers, crop_gt_with_tail=crop_gt_with_tail + ) + else: + dataset.create_groundtruth_database( + info_path=train_filename, save_path=save_path, split='train', sampled_interval=1, + used_classes=['Vehicle', 'Pedestrian', 'Cyclist'], processed_data_tag=processed_data_tag + ) + print('---------------Data preparation Done---------------') + + +if __name__ == '__main__': + import argparse + import yaml + from easydict import EasyDict + + parser = argparse.ArgumentParser(description='arg parser') + parser.add_argument('--cfg_file', type=str, default=None, help='specify the config of dataset') + parser.add_argument('--func', type=str, default='create_waymo_infos', help='') + parser.add_argument('--processed_data_tag', type=str, default='waymo_processed_data_v0_5_0', help='') + parser.add_argument('--update_info_only', action='store_true', default=False, help='') + parser.add_argument('--use_parallel', action='store_true', default=False, help='') + parser.add_argument('--wo_crop_gt_with_tail', action='store_true', default=False, help='') + + args = parser.parse_args() + + ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve() + + if args.func == 'create_waymo_infos': + try: + yaml_config = yaml.safe_load(open(args.cfg_file), Loader=yaml.FullLoader) + except: + yaml_config = yaml.safe_load(open(args.cfg_file)) + dataset_cfg = EasyDict(yaml_config) + dataset_cfg.PROCESSED_DATA_TAG = args.processed_data_tag + create_waymo_infos( + dataset_cfg=dataset_cfg, + class_names=['Vehicle', 'Pedestrian', 'Cyclist'], + data_path=ROOT_DIR / 'data' / 'waymo', + save_path=ROOT_DIR / 'data' / 'waymo', + raw_data_tag='raw_data', + processed_data_tag=args.processed_data_tag, + update_info_only=args.update_info_only + ) + elif args.func == 'create_waymo_gt_database': + try: + yaml_config = yaml.safe_load(open(args.cfg_file), Loader=yaml.FullLoader) + except: + yaml_config = yaml.safe_load(open(args.cfg_file)) + dataset_cfg = EasyDict(yaml_config) + dataset_cfg.PROCESSED_DATA_TAG = args.processed_data_tag + create_waymo_gt_database( + dataset_cfg=dataset_cfg, + class_names=['Vehicle', 'Pedestrian', 'Cyclist'], + data_path=ROOT_DIR / 'data' / 'waymo', + save_path=ROOT_DIR / 'data' / 'waymo', + processed_data_tag=args.processed_data_tag, + use_parallel=args.use_parallel, + crop_gt_with_tail=not args.wo_crop_gt_with_tail + ) + else: + raise NotImplementedError diff --git a/toolbox/openpcdet/pcdet/datasets/waymo/waymo_eval.py b/toolbox/openpcdet/pcdet/datasets/waymo/waymo_eval.py new file mode 100644 index 000000000..0cbf61af1 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/waymo/waymo_eval.py @@ -0,0 +1,251 @@ +# OpenPCDet PyTorch Dataloader and Evaluation Tools for Waymo Open Dataset +# Reference https://github.com/open-mmlab/OpenPCDet +# Written by Shaoshuai Shi, Chaoxu Guo +# All Rights Reserved 2019-2020. + + +import numpy as np +import pickle +import tensorflow as tf +from google.protobuf import text_format +from waymo_open_dataset.metrics.python import detection_metrics +from waymo_open_dataset.protos import metrics_pb2 +import argparse + + +tf.get_logger().setLevel('INFO') + + +def limit_period(val, offset=0.5, period=np.pi): + return val - np.floor(val / period + offset) * period + + +class OpenPCDetWaymoDetectionMetricsEstimator(tf.test.TestCase): + WAYMO_CLASSES = ['unknown', 'Vehicle', 'Pedestrian', 'Sign', 'Cyclist'] + + def generate_waymo_type_results(self, infos, class_names, is_gt=False, fake_gt_infos=True): + def boxes3d_kitti_fakelidar_to_lidar(boxes3d_lidar): + """ + Args: + boxes3d_fakelidar: (N, 7) [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center + + Returns: + boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center + """ + w, l, h, r = boxes3d_lidar[:, 3:4], boxes3d_lidar[:, 4:5], boxes3d_lidar[:, 5:6], boxes3d_lidar[:, 6:7] + boxes3d_lidar[:, 2] += h[:, 0] / 2 + return np.concatenate([boxes3d_lidar[:, 0:3], l, w, h, -(r + np.pi / 2)], axis=-1) + + frame_id, boxes3d, obj_type, score, overlap_nlz, difficulty = [], [], [], [], [], [] + for frame_index, info in enumerate(infos): + if is_gt: + box_mask = np.array([n in class_names for n in info['name']], dtype=np.bool_) + if 'num_points_in_gt' in info: + zero_difficulty_mask = info['difficulty'] == 0 + info['difficulty'][(info['num_points_in_gt'] > 5) & zero_difficulty_mask] = 1 + info['difficulty'][(info['num_points_in_gt'] <= 5) & zero_difficulty_mask] = 2 + nonzero_mask = info['num_points_in_gt'] > 0 + box_mask = box_mask & nonzero_mask + else: + print('Please provide the num_points_in_gt for evaluating on Waymo Dataset ' + '(If you create Waymo Infos before 20201126, please re-create the validation infos ' + 'with version 1.2 Waymo dataset to get this attribute). SSS of OpenPCDet') + raise NotImplementedError + + num_boxes = box_mask.sum() + box_name = info['name'][box_mask] + + difficulty.append(info['difficulty'][box_mask]) + score.append(np.ones(num_boxes)) + if fake_gt_infos: + info['gt_boxes_lidar'] = boxes3d_kitti_fakelidar_to_lidar(info['gt_boxes_lidar']) + + if info['gt_boxes_lidar'].shape[-1] == 9: + boxes3d.append(info['gt_boxes_lidar'][box_mask][:, 0:7]) + else: + boxes3d.append(info['gt_boxes_lidar'][box_mask]) + else: + num_boxes = len(info['boxes_lidar']) + difficulty.append([0] * num_boxes) + score.append(info['score']) + boxes3d.append(np.array(info['boxes_lidar'][:, :7])) + box_name = info['name'] + if boxes3d[-1].shape[-1] == 9: + boxes3d[-1] = boxes3d[-1][:, 0:7] + + obj_type += [self.WAYMO_CLASSES.index(name) for i, name in enumerate(box_name)] + frame_id.append(np.array([frame_index] * num_boxes)) + overlap_nlz.append(np.zeros(num_boxes)) # set zero currently + + frame_id = np.concatenate(frame_id).reshape(-1).astype(np.int64) + boxes3d = np.concatenate(boxes3d, axis=0) + obj_type = np.array(obj_type).reshape(-1) + score = np.concatenate(score).reshape(-1) + overlap_nlz = np.concatenate(overlap_nlz).reshape(-1) + difficulty = np.concatenate(difficulty).reshape(-1).astype(np.int8) + + boxes3d[:, -1] = limit_period(boxes3d[:, -1], offset=0.5, period=np.pi * 2) + + return frame_id, boxes3d, obj_type, score, overlap_nlz, difficulty + + def build_config(self): + config = metrics_pb2.Config() + config_text = """ + breakdown_generator_ids: OBJECT_TYPE + difficulties { + levels:1 + levels:2 + } + matcher_type: TYPE_HUNGARIAN + iou_thresholds: 0.0 + iou_thresholds: 0.7 + iou_thresholds: 0.5 + iou_thresholds: 0.5 + iou_thresholds: 0.5 + box_type: TYPE_3D + """ + + for x in range(0, 100): + config.score_cutoffs.append(x * 0.01) + config.score_cutoffs.append(1.0) + + text_format.Merge(config_text, config) + return config + + def build_graph(self, graph): + with graph.as_default(): + self._pd_frame_id = tf.compat.v1.placeholder(dtype=tf.int64) + self._pd_bbox = tf.compat.v1.placeholder(dtype=tf.float32) + self._pd_type = tf.compat.v1.placeholder(dtype=tf.uint8) + self._pd_score = tf.compat.v1.placeholder(dtype=tf.float32) + self._pd_overlap_nlz = tf.compat.v1.placeholder(dtype=tf.bool) + + self._gt_frame_id = tf.compat.v1.placeholder(dtype=tf.int64) + self._gt_bbox = tf.compat.v1.placeholder(dtype=tf.float32) + self._gt_type = tf.compat.v1.placeholder(dtype=tf.uint8) + self._gt_difficulty = tf.compat.v1.placeholder(dtype=tf.uint8) + metrics = detection_metrics.get_detection_metric_ops( + config=self.build_config(), + prediction_frame_id=self._pd_frame_id, + prediction_bbox=self._pd_bbox, + prediction_type=self._pd_type, + prediction_score=self._pd_score, + prediction_overlap_nlz=self._pd_overlap_nlz, + ground_truth_bbox=self._gt_bbox, + ground_truth_type=self._gt_type, + ground_truth_frame_id=self._gt_frame_id, + ground_truth_difficulty=self._gt_difficulty, + ) + return metrics + + def run_eval_ops( + self, + sess, + graph, + metrics, + prediction_frame_id, + prediction_bbox, + prediction_type, + prediction_score, + prediction_overlap_nlz, + ground_truth_frame_id, + ground_truth_bbox, + ground_truth_type, + ground_truth_difficulty, + ): + sess.run( + [tf.group([value[1] for value in metrics.values()])], + feed_dict={ + self._pd_bbox: prediction_bbox, + self._pd_frame_id: prediction_frame_id, + self._pd_type: prediction_type, + self._pd_score: prediction_score, + self._pd_overlap_nlz: prediction_overlap_nlz, + self._gt_bbox: ground_truth_bbox, + self._gt_type: ground_truth_type, + self._gt_frame_id: ground_truth_frame_id, + self._gt_difficulty: ground_truth_difficulty, + }, + ) + + def eval_value_ops(self, sess, graph, metrics): + return {item[0]: sess.run([item[1][0]]) for item in metrics.items()} + + def mask_by_distance(self, distance_thresh, boxes_3d, *args): + mask = np.linalg.norm(boxes_3d[:, 0:2], axis=1) < distance_thresh + 0.5 + boxes_3d = boxes_3d[mask] + ret_ans = [boxes_3d] + for arg in args: + ret_ans.append(arg[mask]) + + return tuple(ret_ans) + + def waymo_evaluation(self, prediction_infos, gt_infos, class_name, distance_thresh=100, fake_gt_infos=True): + print('Start the waymo evaluation...') + assert len(prediction_infos) == len(gt_infos), '%d vs %d' % (prediction_infos.__len__(), gt_infos.__len__()) + + tf.compat.v1.disable_eager_execution() + pd_frameid, pd_boxes3d, pd_type, pd_score, pd_overlap_nlz, _ = self.generate_waymo_type_results( + prediction_infos, class_name, is_gt=False + ) + gt_frameid, gt_boxes3d, gt_type, gt_score, gt_overlap_nlz, gt_difficulty = self.generate_waymo_type_results( + gt_infos, class_name, is_gt=True, fake_gt_infos=fake_gt_infos + ) + + pd_boxes3d, pd_frameid, pd_type, pd_score, pd_overlap_nlz = self.mask_by_distance( + distance_thresh, pd_boxes3d, pd_frameid, pd_type, pd_score, pd_overlap_nlz + ) + gt_boxes3d, gt_frameid, gt_type, gt_score, gt_difficulty = self.mask_by_distance( + distance_thresh, gt_boxes3d, gt_frameid, gt_type, gt_score, gt_difficulty + ) + + print('Number: (pd, %d) VS. (gt, %d)' % (len(pd_boxes3d), len(gt_boxes3d))) + print('Level 1: %d, Level2: %d)' % ((gt_difficulty == 1).sum(), (gt_difficulty == 2).sum())) + + if pd_score.max() > 1: + # assert pd_score.max() <= 1.0, 'Waymo evaluation only supports normalized scores' + pd_score = 1 / (1 + np.exp(-pd_score)) + print('Warning: Waymo evaluation only supports normalized scores') + + graph = tf.Graph() + metrics = self.build_graph(graph) + with self.test_session(graph=graph) as sess: + sess.run(tf.compat.v1.initializers.local_variables()) + self.run_eval_ops( + sess, graph, metrics, pd_frameid, pd_boxes3d, pd_type, pd_score, pd_overlap_nlz, + gt_frameid, gt_boxes3d, gt_type, gt_difficulty, + ) + with tf.compat.v1.variable_scope('detection_metrics', reuse=True): + aps = self.eval_value_ops(sess, graph, metrics) + return aps + + +def main(): + parser = argparse.ArgumentParser(description='arg parser') + parser.add_argument('--pred_infos', type=str, default=None, help='pickle file') + parser.add_argument('--gt_infos', type=str, default=None, help='pickle file') + parser.add_argument('--class_names', type=str, nargs='+', default=['Vehicle', 'Pedestrian', 'Cyclist'], help='') + parser.add_argument('--sampled_interval', type=int, default=5, help='sampled interval for GT sequences') + args = parser.parse_args() + + pred_infos = pickle.load(open(args.pred_infos, 'rb')) + gt_infos = pickle.load(open(args.gt_infos, 'rb')) + + print('Start to evaluate the waymo format results...') + eval = OpenPCDetWaymoDetectionMetricsEstimator() + + gt_infos_dst = [] + for idx in range(0, len(gt_infos), args.sampled_interval): + cur_info = gt_infos[idx]['annos'] + cur_info['frame_id'] = gt_infos[idx]['frame_id'] + gt_infos_dst.append(cur_info) + + waymo_AP = eval.waymo_evaluation( + pred_infos, gt_infos_dst, class_name=args.class_names, distance_thresh=1000, fake_gt_infos=False + ) + + print(waymo_AP) + + +if __name__ == '__main__': + main() diff --git a/toolbox/openpcdet/pcdet/datasets/waymo/waymo_utils.py b/toolbox/openpcdet/pcdet/datasets/waymo/waymo_utils.py new file mode 100644 index 000000000..9bdcfe7b2 --- /dev/null +++ b/toolbox/openpcdet/pcdet/datasets/waymo/waymo_utils.py @@ -0,0 +1,268 @@ +# OpenPCDet PyTorch Dataloader and Evaluation Tools for Waymo Open Dataset +# Reference https://github.com/open-mmlab/OpenPCDet +# Written by Shaoshuai Shi, Chaoxu Guo +# All Rights Reserved 2019-2020. + + +import os +import pickle +import numpy as np +from ...utils import common_utils +import tensorflow as tf +from waymo_open_dataset.utils import frame_utils, transform_utils, range_image_utils +from waymo_open_dataset import dataset_pb2 + +try: + tf.enable_eager_execution() +except: + pass + +WAYMO_CLASSES = ['unknown', 'Vehicle', 'Pedestrian', 'Sign', 'Cyclist'] + + +def generate_labels(frame, pose): + obj_name, difficulty, dimensions, locations, heading_angles = [], [], [], [], [] + tracking_difficulty, speeds, accelerations, obj_ids = [], [], [], [] + num_points_in_gt = [] + laser_labels = frame.laser_labels + + for i in range(len(laser_labels)): + box = laser_labels[i].box + class_ind = laser_labels[i].type + loc = [box.center_x, box.center_y, box.center_z] + heading_angles.append(box.heading) + obj_name.append(WAYMO_CLASSES[class_ind]) + difficulty.append(laser_labels[i].detection_difficulty_level) + tracking_difficulty.append(laser_labels[i].tracking_difficulty_level) + dimensions.append([box.length, box.width, box.height]) # lwh in unified coordinate of OpenPCDet + locations.append(loc) + obj_ids.append(laser_labels[i].id) + num_points_in_gt.append(laser_labels[i].num_lidar_points_in_box) + speeds.append([laser_labels[i].metadata.speed_x, laser_labels[i].metadata.speed_y]) + accelerations.append([laser_labels[i].metadata.accel_x, laser_labels[i].metadata.accel_y]) + + annotations = {} + annotations['name'] = np.array(obj_name) + annotations['difficulty'] = np.array(difficulty) + annotations['dimensions'] = np.array(dimensions) + annotations['location'] = np.array(locations) + annotations['heading_angles'] = np.array(heading_angles) + + annotations['obj_ids'] = np.array(obj_ids) + annotations['tracking_difficulty'] = np.array(tracking_difficulty) + annotations['num_points_in_gt'] = np.array(num_points_in_gt) + annotations['speed_global'] = np.array(speeds) + annotations['accel_global'] = np.array(accelerations) + + annotations = common_utils.drop_info_with_name(annotations, name='unknown') + if annotations['name'].__len__() > 0: + global_speed = np.pad(annotations['speed_global'], ((0, 0), (0, 1)), mode='constant', constant_values=0) # (N, 3) + speed = np.dot(global_speed, np.linalg.inv(pose[:3, :3].T)) + speed = speed[:, :2] + + gt_boxes_lidar = np.concatenate([ + annotations['location'], annotations['dimensions'], annotations['heading_angles'][..., np.newaxis], speed], + axis=1 + ) + else: + gt_boxes_lidar = np.zeros((0, 9)) + annotations['gt_boxes_lidar'] = gt_boxes_lidar + return annotations + + +def convert_range_image_to_point_cloud(frame, range_images, camera_projections, range_image_top_pose, ri_index=(0, 1)): + """ + Modified from the codes of Waymo Open Dataset. + Convert range images to point cloud. + Args: + frame: open dataset frame + range_images: A dict of {laser_name, [range_image_first_return, range_image_second_return]}. + camera_projections: A dict of {laser_name, + [camera_projection_from_first_return, camera_projection_from_second_return]}. + range_image_top_pose: range image pixel pose for top lidar. + ri_index: 0 for the first return, 1 for the second return. + + Returns: + points: {[N, 3]} list of 3d lidar points of length 5 (number of lidars). + cp_points: {[N, 6]} list of camera projections of length 5 (number of lidars). + """ + calibrations = sorted(frame.context.laser_calibrations, key=lambda c: c.name) + points = [] + cp_points = [] + points_NLZ = [] + points_intensity = [] + points_elongation = [] + + frame_pose = tf.convert_to_tensor(np.reshape(np.array(frame.pose.transform), [4, 4])) + # [H, W, 6] + range_image_top_pose_tensor = tf.reshape( + tf.convert_to_tensor(range_image_top_pose.data), range_image_top_pose.shape.dims + ) + # [H, W, 3, 3] + range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix( + range_image_top_pose_tensor[..., 0], range_image_top_pose_tensor[..., 1], + range_image_top_pose_tensor[..., 2]) + range_image_top_pose_tensor_translation = range_image_top_pose_tensor[..., 3:] + range_image_top_pose_tensor = transform_utils.get_transform( + range_image_top_pose_tensor_rotation, + range_image_top_pose_tensor_translation) + + for c in calibrations: + points_single, cp_points_single, points_NLZ_single, points_intensity_single, points_elongation_single \ + = [], [], [], [], [] + for cur_ri_index in ri_index: + range_image = range_images[c.name][cur_ri_index] + if len(c.beam_inclinations) == 0: # pylint: disable=g-explicit-length-test + beam_inclinations = range_image_utils.compute_inclination( + tf.constant([c.beam_inclination_min, c.beam_inclination_max]), + height=range_image.shape.dims[0]) + else: + beam_inclinations = tf.constant(c.beam_inclinations) + + beam_inclinations = tf.reverse(beam_inclinations, axis=[-1]) + extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4]) + + range_image_tensor = tf.reshape( + tf.convert_to_tensor(range_image.data), range_image.shape.dims) + pixel_pose_local = None + frame_pose_local = None + if c.name == dataset_pb2.LaserName.TOP: + pixel_pose_local = range_image_top_pose_tensor + pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0) + frame_pose_local = tf.expand_dims(frame_pose, axis=0) + range_image_mask = range_image_tensor[..., 0] > 0 + range_image_NLZ = range_image_tensor[..., 3] + range_image_intensity = range_image_tensor[..., 1] + range_image_elongation = range_image_tensor[..., 2] + range_image_cartesian = range_image_utils.extract_point_cloud_from_range_image( + tf.expand_dims(range_image_tensor[..., 0], axis=0), + tf.expand_dims(extrinsic, axis=0), + tf.expand_dims(tf.convert_to_tensor(beam_inclinations), axis=0), + pixel_pose=pixel_pose_local, + frame_pose=frame_pose_local) + + range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0) + points_tensor = tf.gather_nd(range_image_cartesian, + tf.where(range_image_mask)) + points_NLZ_tensor = tf.gather_nd(range_image_NLZ, tf.compat.v1.where(range_image_mask)) + points_intensity_tensor = tf.gather_nd(range_image_intensity, tf.compat.v1.where(range_image_mask)) + points_elongation_tensor = tf.gather_nd(range_image_elongation, tf.compat.v1.where(range_image_mask)) + cp = camera_projections[c.name][0] + cp_tensor = tf.reshape(tf.convert_to_tensor(cp.data), cp.shape.dims) + cp_points_tensor = tf.gather_nd(cp_tensor, tf.where(range_image_mask)) + + points_single.append(points_tensor.numpy()) + cp_points_single.append(cp_points_tensor.numpy()) + points_NLZ_single.append(points_NLZ_tensor.numpy()) + points_intensity_single.append(points_intensity_tensor.numpy()) + points_elongation_single.append(points_elongation_tensor.numpy()) + + points.append(np.concatenate(points_single, axis=0)) + cp_points.append(np.concatenate(cp_points_single, axis=0)) + points_NLZ.append(np.concatenate(points_NLZ_single, axis=0)) + points_intensity.append(np.concatenate(points_intensity_single, axis=0)) + points_elongation.append(np.concatenate(points_elongation_single, axis=0)) + + return points, cp_points, points_NLZ, points_intensity, points_elongation + + +def save_lidar_points(frame, cur_save_path, use_two_returns=True): + ret_outputs = frame_utils.parse_range_image_and_camera_projection(frame) + if len(ret_outputs) == 4: + range_images, camera_projections, seg_labels, range_image_top_pose = ret_outputs + else: + assert len(ret_outputs) == 3 + range_images, camera_projections, range_image_top_pose = ret_outputs + + points, cp_points, points_in_NLZ_flag, points_intensity, points_elongation = convert_range_image_to_point_cloud( + frame, range_images, camera_projections, range_image_top_pose, ri_index=(0, 1) if use_two_returns else (0,) + ) + + # 3d points in vehicle frame. + points_all = np.concatenate(points, axis=0) + points_in_NLZ_flag = np.concatenate(points_in_NLZ_flag, axis=0).reshape(-1, 1) + points_intensity = np.concatenate(points_intensity, axis=0).reshape(-1, 1) + points_elongation = np.concatenate(points_elongation, axis=0).reshape(-1, 1) + + num_points_of_each_lidar = [point.shape[0] for point in points] + save_points = np.concatenate([ + points_all, points_intensity, points_elongation, points_in_NLZ_flag + ], axis=-1).astype(np.float32) + + np.save(cur_save_path, save_points) + # print('saving to ', cur_save_path) + return num_points_of_each_lidar + + +def process_single_sequence(sequence_file, save_path, sampled_interval, has_label=True, use_two_returns=True, update_info_only=False): + sequence_name = os.path.splitext(os.path.basename(sequence_file))[0] + + # print('Load record (sampled_interval=%d): %s' % (sampled_interval, sequence_name)) + if not sequence_file.exists(): + print('NotFoundError: %s' % sequence_file) + return [] + + dataset = tf.data.TFRecordDataset(str(sequence_file), compression_type='') + cur_save_dir = save_path / sequence_name + cur_save_dir.mkdir(parents=True, exist_ok=True) + pkl_file = cur_save_dir / ('%s.pkl' % sequence_name) + + sequence_infos = [] + if pkl_file.exists(): + sequence_infos = pickle.load(open(pkl_file, 'rb')) + sequence_infos_old = None + if not update_info_only: + print('Skip sequence since it has been processed before: %s' % pkl_file) + return sequence_infos + else: + sequence_infos_old = sequence_infos + sequence_infos = [] + + for cnt, data in enumerate(dataset): + if cnt % sampled_interval != 0: + continue + # print(sequence_name, cnt) + frame = dataset_pb2.Frame() + frame.ParseFromString(bytearray(data.numpy())) + + info = {} + pc_info = {'num_features': 5, 'lidar_sequence': sequence_name, 'sample_idx': cnt} + info['point_cloud'] = pc_info + + info['frame_id'] = sequence_name + ('_%03d' % cnt) + info['metadata'] = { + 'context_name': frame.context.name, + 'timestamp_micros': frame.timestamp_micros + } + image_info = {} + for j in range(5): + width = frame.context.camera_calibrations[j].width + height = frame.context.camera_calibrations[j].height + image_info.update({'image_shape_%d' % j: (height, width)}) + info['image'] = image_info + + pose = np.array(frame.pose.transform, dtype=np.float32).reshape(4, 4) + info['pose'] = pose + + if has_label: + annotations = generate_labels(frame, pose=pose) + info['annos'] = annotations + + if update_info_only and sequence_infos_old is not None: + assert info['frame_id'] == sequence_infos_old[cnt]['frame_id'] + num_points_of_each_lidar = sequence_infos_old[cnt]['num_points_of_each_lidar'] + else: + num_points_of_each_lidar = save_lidar_points( + frame, cur_save_dir / ('%04d.npy' % cnt), use_two_returns=use_two_returns + ) + info['num_points_of_each_lidar'] = num_points_of_each_lidar + + sequence_infos.append(info) + + with open(pkl_file, 'wb') as f: + pickle.dump(sequence_infos, f) + + print('Infos are saved to (sampled_interval=%d): %s' % (sampled_interval, pkl_file)) + return sequence_infos + + diff --git a/toolbox/openpcdet/pcdet/models/__init__.py b/toolbox/openpcdet/pcdet/models/__init__.py new file mode 100644 index 000000000..7049bb4a0 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/__init__.py @@ -0,0 +1,54 @@ +from collections import namedtuple + +import numpy as np +import torch + +from .detectors import build_detector + +try: + import kornia +except: + pass + # print('Warning: kornia is not installed. This package is only required by CaDDN') + + + +def build_network(model_cfg, num_class, dataset): + model = build_detector( + model_cfg=model_cfg, num_class=num_class, dataset=dataset + ) + return model + + +def load_data_to_gpu(batch_dict): + for key, val in batch_dict.items(): + if key == 'camera_imgs': + batch_dict[key] = val.cuda() + elif not isinstance(val, np.ndarray): + continue + elif key in ['frame_id', 'metadata', 'calib', 'image_paths','ori_shape','img_process_infos']: + continue + elif key in ['images']: + batch_dict[key] = kornia.image_to_tensor(val).float().cuda().contiguous() + elif key in ['image_shape']: + batch_dict[key] = torch.from_numpy(val).int().cuda() + else: + batch_dict[key] = torch.from_numpy(val).float().cuda() + + +def model_fn_decorator(): + ModelReturn = namedtuple('ModelReturn', ['loss', 'tb_dict', 'disp_dict']) + + def model_func(model, batch_dict): + load_data_to_gpu(batch_dict) + ret_dict, tb_dict, disp_dict = model(batch_dict) + + loss = ret_dict['loss'].mean() + if hasattr(model, 'update_global_step'): + model.update_global_step() + else: + model.module.update_global_step() + + return ModelReturn(loss, tb_dict, disp_dict) + + return model_func diff --git a/toolbox/openpcdet/pcdet/models/backbones_2d/__init__.py b/toolbox/openpcdet/pcdet/models/backbones_2d/__init__.py new file mode 100644 index 000000000..fea8d1653 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_2d/__init__.py @@ -0,0 +1,7 @@ +from .base_bev_backbone import BaseBEVBackbone, BaseBEVBackboneV1, BaseBEVResBackbone + +__all__ = { + 'BaseBEVBackbone': BaseBEVBackbone, + 'BaseBEVBackboneV1': BaseBEVBackboneV1, + 'BaseBEVResBackbone': BaseBEVResBackbone, +} diff --git a/toolbox/openpcdet/pcdet/models/backbones_2d/base_bev_backbone.py b/toolbox/openpcdet/pcdet/models/backbones_2d/base_bev_backbone.py new file mode 100644 index 000000000..4dc7dbb77 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_2d/base_bev_backbone.py @@ -0,0 +1,351 @@ +import numpy as np +import torch +import torch.nn as nn + + +class BaseBEVBackbone(nn.Module): + def __init__(self, model_cfg, input_channels): + super().__init__() + self.model_cfg = model_cfg + + if self.model_cfg.get('LAYER_NUMS', None) is not None: + assert len(self.model_cfg.LAYER_NUMS) == len(self.model_cfg.LAYER_STRIDES) == len(self.model_cfg.NUM_FILTERS) + layer_nums = self.model_cfg.LAYER_NUMS + layer_strides = self.model_cfg.LAYER_STRIDES + num_filters = self.model_cfg.NUM_FILTERS + else: + layer_nums = layer_strides = num_filters = [] + + if self.model_cfg.get('UPSAMPLE_STRIDES', None) is not None: + assert len(self.model_cfg.UPSAMPLE_STRIDES) == len(self.model_cfg.NUM_UPSAMPLE_FILTERS) + num_upsample_filters = self.model_cfg.NUM_UPSAMPLE_FILTERS + upsample_strides = self.model_cfg.UPSAMPLE_STRIDES + else: + upsample_strides = num_upsample_filters = [] + + num_levels = len(layer_nums) + c_in_list = [input_channels, *num_filters[:-1]] + self.blocks = nn.ModuleList() + self.deblocks = nn.ModuleList() + for idx in range(num_levels): + cur_layers = [ + nn.ZeroPad2d(1), + nn.Conv2d( + c_in_list[idx], num_filters[idx], kernel_size=3, + stride=layer_strides[idx], padding=0, bias=False + ), + nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01), + nn.ReLU() + ] + for k in range(layer_nums[idx]): + cur_layers.extend([ + nn.Conv2d(num_filters[idx], num_filters[idx], kernel_size=3, padding=1, bias=False), + nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01), + nn.ReLU() + ]) + self.blocks.append(nn.Sequential(*cur_layers)) + if len(upsample_strides) > 0: + stride = upsample_strides[idx] + if stride > 1 or (stride == 1 and not self.model_cfg.get('USE_CONV_FOR_NO_STRIDE', False)): + self.deblocks.append(nn.Sequential( + nn.ConvTranspose2d( + num_filters[idx], num_upsample_filters[idx], + upsample_strides[idx], + stride=upsample_strides[idx], bias=False + ), + nn.BatchNorm2d(num_upsample_filters[idx], eps=1e-3, momentum=0.01), + nn.ReLU() + )) + else: + stride = np.round(1 / stride).astype(np.int) + self.deblocks.append(nn.Sequential( + nn.Conv2d( + num_filters[idx], num_upsample_filters[idx], + stride, + stride=stride, bias=False + ), + nn.BatchNorm2d(num_upsample_filters[idx], eps=1e-3, momentum=0.01), + nn.ReLU() + )) + + c_in = sum(num_upsample_filters) + if len(upsample_strides) > num_levels: + self.deblocks.append(nn.Sequential( + nn.ConvTranspose2d(c_in, c_in, upsample_strides[-1], stride=upsample_strides[-1], bias=False), + nn.BatchNorm2d(c_in, eps=1e-3, momentum=0.01), + nn.ReLU(), + )) + + self.num_bev_features = c_in + + def forward(self, data_dict): + """ + Args: + data_dict: + spatial_features + Returns: + """ + spatial_features = data_dict['spatial_features'] + ups = [] + ret_dict = {} + x = spatial_features + for i in range(len(self.blocks)): + x = self.blocks[i](x) + + stride = int(spatial_features.shape[2] / x.shape[2]) + ret_dict['spatial_features_%dx' % stride] = x + if len(self.deblocks) > 0: + ups.append(self.deblocks[i](x)) + else: + ups.append(x) + + if len(ups) > 1: + x = torch.cat(ups, dim=1) + elif len(ups) == 1: + x = ups[0] + + if len(self.deblocks) > len(self.blocks): + x = self.deblocks[-1](x) + + data_dict['spatial_features_2d'] = x + + return data_dict + + +class BaseBEVBackboneV1(nn.Module): + def __init__(self, model_cfg, **kwargs): + super().__init__() + self.model_cfg = model_cfg + + layer_nums = self.model_cfg.LAYER_NUMS + num_filters = self.model_cfg.NUM_FILTERS + assert len(layer_nums) == len(num_filters) == 2 + + num_upsample_filters = self.model_cfg.NUM_UPSAMPLE_FILTERS + upsample_strides = self.model_cfg.UPSAMPLE_STRIDES + assert len(num_upsample_filters) == len(upsample_strides) + + num_levels = len(layer_nums) + self.blocks = nn.ModuleList() + self.deblocks = nn.ModuleList() + for idx in range(num_levels): + cur_layers = [ + nn.ZeroPad2d(1), + nn.Conv2d( + num_filters[idx], num_filters[idx], kernel_size=3, + stride=1, padding=0, bias=False + ), + nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01), + nn.ReLU() + ] + for k in range(layer_nums[idx]): + cur_layers.extend([ + nn.Conv2d(num_filters[idx], num_filters[idx], kernel_size=3, padding=1, bias=False), + nn.BatchNorm2d(num_filters[idx], eps=1e-3, momentum=0.01), + nn.ReLU() + ]) + self.blocks.append(nn.Sequential(*cur_layers)) + if len(upsample_strides) > 0: + stride = upsample_strides[idx] + if stride >= 1: + self.deblocks.append(nn.Sequential( + nn.ConvTranspose2d( + num_filters[idx], num_upsample_filters[idx], + upsample_strides[idx], + stride=upsample_strides[idx], bias=False + ), + nn.BatchNorm2d(num_upsample_filters[idx], eps=1e-3, momentum=0.01), + nn.ReLU() + )) + else: + stride = np.round(1 / stride).astype(np.int) + self.deblocks.append(nn.Sequential( + nn.Conv2d( + num_filters[idx], num_upsample_filters[idx], + stride, + stride=stride, bias=False + ), + nn.BatchNorm2d(num_upsample_filters[idx], eps=1e-3, momentum=0.01), + nn.ReLU() + )) + + c_in = sum(num_upsample_filters) + if len(upsample_strides) > num_levels: + self.deblocks.append(nn.Sequential( + nn.ConvTranspose2d(c_in, c_in, upsample_strides[-1], stride=upsample_strides[-1], bias=False), + nn.BatchNorm2d(c_in, eps=1e-3, momentum=0.01), + nn.ReLU(), + )) + + self.num_bev_features = c_in + + def forward(self, data_dict): + """ + Args: + data_dict: + spatial_features + Returns: + """ + spatial_features = data_dict['multi_scale_2d_features'] + + x_conv4 = spatial_features['x_conv4'] + x_conv5 = spatial_features['x_conv5'] + + ups = [self.deblocks[0](x_conv4)] + + x = self.blocks[1](x_conv5) + ups.append(self.deblocks[1](x)) + + x = torch.cat(ups, dim=1) + x = self.blocks[0](x) + + data_dict['spatial_features_2d'] = x + + return data_dict + + +class BasicBlock(nn.Module): + expansion: int = 1 + + def __init__( + self, + inplanes: int, + planes: int, + stride: int = 1, + padding: int = 1, + downsample: bool = False, + ) -> None: + super().__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=padding, bias=False) + self.bn1 = nn.BatchNorm2d(planes, eps=1e-3, momentum=0.01) + self.relu1 = nn.ReLU() + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes, eps=1e-3, momentum=0.01) + self.relu2 = nn.ReLU() + self.downsample = downsample + if self.downsample: + self.downsample_layer = nn.Sequential( + nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, padding=0, bias=False), + nn.BatchNorm2d(planes, eps=1e-3, momentum=0.01) + ) + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu1(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample: + identity = self.downsample_layer(x) + + out += identity + out = self.relu2(out) + + return out + + +class BaseBEVResBackbone(nn.Module): + def __init__(self, model_cfg, input_channels): + super().__init__() + self.model_cfg = model_cfg + + if self.model_cfg.get('LAYER_NUMS', None) is not None: + assert len(self.model_cfg.LAYER_NUMS) == len(self.model_cfg.LAYER_STRIDES) == len(self.model_cfg.NUM_FILTERS) + layer_nums = self.model_cfg.LAYER_NUMS + layer_strides = self.model_cfg.LAYER_STRIDES + num_filters = self.model_cfg.NUM_FILTERS + else: + layer_nums = layer_strides = num_filters = [] + + if self.model_cfg.get('UPSAMPLE_STRIDES', None) is not None: + assert len(self.model_cfg.UPSAMPLE_STRIDES) == len(self.model_cfg.NUM_UPSAMPLE_FILTERS) + num_upsample_filters = self.model_cfg.NUM_UPSAMPLE_FILTERS + upsample_strides = self.model_cfg.UPSAMPLE_STRIDES + else: + upsample_strides = num_upsample_filters = [] + + num_levels = len(layer_nums) + c_in_list = [input_channels, *num_filters[:-1]] + self.blocks = nn.ModuleList() + self.deblocks = nn.ModuleList() + for idx in range(num_levels): + cur_layers = [ + # nn.ZeroPad2d(1), + BasicBlock(c_in_list[idx], num_filters[idx], layer_strides[idx], 1, True) + ] + for k in range(layer_nums[idx]): + cur_layers.extend([ + BasicBlock(num_filters[idx], num_filters[idx]) + ]) + self.blocks.append(nn.Sequential(*cur_layers)) + if len(upsample_strides) > 0: + stride = upsample_strides[idx] + if stride >= 1: + self.deblocks.append(nn.Sequential( + nn.ConvTranspose2d( + num_filters[idx], num_upsample_filters[idx], + upsample_strides[idx], + stride=upsample_strides[idx], bias=False + ), + nn.BatchNorm2d(num_upsample_filters[idx], eps=1e-3, momentum=0.01), + nn.ReLU() + )) + else: + stride = np.round(1 / stride).astype(np.int) + self.deblocks.append(nn.Sequential( + nn.Conv2d( + num_filters[idx], num_upsample_filters[idx], + stride, + stride=stride, bias=False + ), + nn.BatchNorm2d(num_upsample_filters[idx], eps=1e-3, momentum=0.01), + nn.ReLU() + )) + + c_in = sum(num_upsample_filters) if len(num_upsample_filters) > 0 else sum(num_filters) + if len(upsample_strides) > num_levels: + self.deblocks.append(nn.Sequential( + nn.ConvTranspose2d(c_in, c_in, upsample_strides[-1], stride=upsample_strides[-1], bias=False), + nn.BatchNorm2d(c_in, eps=1e-3, momentum=0.01), + nn.ReLU(), + )) + + self.num_bev_features = c_in + + def forward(self, data_dict): + """ + Args: + data_dict: + spatial_features + Returns: + """ + spatial_features = data_dict['spatial_features'] + ups = [] + ret_dict = {} + x = spatial_features + for i in range(len(self.blocks)): + x = self.blocks[i](x) + + stride = int(spatial_features.shape[2] / x.shape[2]) + ret_dict['spatial_features_%dx' % stride] = x + if len(self.deblocks) > 0: + ups.append(self.deblocks[i](x)) + else: + ups.append(x) + + if len(ups) > 1: + x = torch.cat(ups, dim=1) + elif len(ups) == 1: + x = ups[0] + + if len(self.deblocks) > len(self.blocks): + x = self.deblocks[-1](x) + + data_dict['spatial_features_2d'] = x + + return data_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_2d/fuser/__init__.py b/toolbox/openpcdet/pcdet/models/backbones_2d/fuser/__init__.py new file mode 100644 index 000000000..b65b9738c --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_2d/fuser/__init__.py @@ -0,0 +1,4 @@ +from .convfuser import ConvFuser +__all__ = { + 'ConvFuser':ConvFuser +} \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/models/backbones_2d/fuser/convfuser.py b/toolbox/openpcdet/pcdet/models/backbones_2d/fuser/convfuser.py new file mode 100644 index 000000000..9107e75a7 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_2d/fuser/convfuser.py @@ -0,0 +1,33 @@ +import torch +from torch import nn + + +class ConvFuser(nn.Module): + def __init__(self,model_cfg) -> None: + super().__init__() + self.model_cfg = model_cfg + in_channel = self.model_cfg.IN_CHANNEL + out_channel = self.model_cfg.OUT_CHANNEL + self.conv = nn.Sequential( + nn.Conv2d(in_channel, out_channel, 3, padding=1, bias=False), + nn.BatchNorm2d(out_channel), + nn.ReLU(True) + ) + + def forward(self,batch_dict): + """ + Args: + batch_dict: + spatial_features_img (tensor): Bev features from image modality + spatial_features (tensor): Bev features from lidar modality + + Returns: + batch_dict: + spatial_features (tensor): Bev features after muli-modal fusion + """ + img_bev = batch_dict['spatial_features_img'] + lidar_bev = batch_dict['spatial_features'] + cat_bev = torch.cat([img_bev,lidar_bev],dim=1) + mm_bev = self.conv(cat_bev) + batch_dict['spatial_features'] = mm_bev + return batch_dict \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/models/backbones_2d/map_to_bev/__init__.py b/toolbox/openpcdet/pcdet/models/backbones_2d/map_to_bev/__init__.py new file mode 100644 index 000000000..e7724d1ac --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_2d/map_to_bev/__init__.py @@ -0,0 +1,10 @@ +from .height_compression import HeightCompression +from .pointpillar_scatter import PointPillarScatter, PointPillarScatter3d +from .conv2d_collapse import Conv2DCollapse + +__all__ = { + 'HeightCompression': HeightCompression, + 'PointPillarScatter': PointPillarScatter, + 'Conv2DCollapse': Conv2DCollapse, + 'PointPillarScatter3d': PointPillarScatter3d, +} diff --git a/toolbox/openpcdet/pcdet/models/backbones_2d/map_to_bev/conv2d_collapse.py b/toolbox/openpcdet/pcdet/models/backbones_2d/map_to_bev/conv2d_collapse.py new file mode 100644 index 000000000..e7d73baf2 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_2d/map_to_bev/conv2d_collapse.py @@ -0,0 +1,38 @@ +import torch +import torch.nn as nn + +from pcdet.models.model_utils.basic_block_2d import BasicBlock2D + + +class Conv2DCollapse(nn.Module): + + def __init__(self, model_cfg, grid_size): + """ + Initializes 2D convolution collapse module + Args: + model_cfg: EasyDict, Model configuration + grid_size: (X, Y, Z) Voxel grid size + """ + super().__init__() + self.model_cfg = model_cfg + self.num_heights = grid_size[-1] + self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES + self.block = BasicBlock2D(in_channels=self.num_bev_features * self.num_heights, + out_channels=self.num_bev_features, + **self.model_cfg.ARGS) + + def forward(self, batch_dict): + """ + Collapses voxel features to BEV via concatenation and channel reduction + Args: + batch_dict: + voxel_features: (B, C, Z, Y, X), Voxel feature representation + Returns: + batch_dict: + spatial_features: (B, C, Y, X), BEV feature representation + """ + voxel_features = batch_dict["voxel_features"] + bev_features = voxel_features.flatten(start_dim=1, end_dim=2) # (B, C, Z, Y, X) -> (B, C*Z, Y, X) + bev_features = self.block(bev_features) # (B, C*Z, Y, X) -> (B, C, Y, X) + batch_dict["spatial_features"] = bev_features + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_2d/map_to_bev/height_compression.py b/toolbox/openpcdet/pcdet/models/backbones_2d/map_to_bev/height_compression.py new file mode 100644 index 000000000..98c8e573e --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_2d/map_to_bev/height_compression.py @@ -0,0 +1,26 @@ +import torch.nn as nn + + +class HeightCompression(nn.Module): + def __init__(self, model_cfg, **kwargs): + super().__init__() + self.model_cfg = model_cfg + self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES + + def forward(self, batch_dict): + """ + Args: + batch_dict: + encoded_spconv_tensor: sparse tensor + Returns: + batch_dict: + spatial_features: + + """ + encoded_spconv_tensor = batch_dict['encoded_spconv_tensor'] + spatial_features = encoded_spconv_tensor.dense() + N, C, D, H, W = spatial_features.shape + spatial_features = spatial_features.view(N, C * D, H, W) + batch_dict['spatial_features'] = spatial_features + batch_dict['spatial_features_stride'] = batch_dict['encoded_spconv_tensor_stride'] + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_2d/map_to_bev/pointpillar_scatter.py b/toolbox/openpcdet/pcdet/models/backbones_2d/map_to_bev/pointpillar_scatter.py new file mode 100644 index 000000000..c57cda867 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_2d/map_to_bev/pointpillar_scatter.py @@ -0,0 +1,73 @@ +import torch +import torch.nn as nn + + +class PointPillarScatter(nn.Module): + def __init__(self, model_cfg, grid_size, **kwargs): + super().__init__() + + self.model_cfg = model_cfg + self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES + self.nx, self.ny, self.nz = grid_size + assert self.nz == 1 + + def forward(self, batch_dict, **kwargs): + pillar_features, coords = batch_dict['pillar_features'], batch_dict['voxel_coords'] + batch_spatial_features = [] + batch_size = coords[:, 0].max().int().item() + 1 + for batch_idx in range(batch_size): + spatial_feature = torch.zeros( + self.num_bev_features, + self.nz * self.nx * self.ny, + dtype=pillar_features.dtype, + device=pillar_features.device) + + batch_mask = coords[:, 0] == batch_idx + this_coords = coords[batch_mask, :] + indices = this_coords[:, 1] + this_coords[:, 2] * self.nx + this_coords[:, 3] + indices = indices.type(torch.long) + pillars = pillar_features[batch_mask, :] + pillars = pillars.t() + spatial_feature[:, indices] = pillars + batch_spatial_features.append(spatial_feature) + + batch_spatial_features = torch.stack(batch_spatial_features, 0) + batch_spatial_features = batch_spatial_features.view(batch_size, self.num_bev_features * self.nz, self.ny, self.nx) + batch_dict['spatial_features'] = batch_spatial_features + return batch_dict + + +class PointPillarScatter3d(nn.Module): + def __init__(self, model_cfg, grid_size, **kwargs): + super().__init__() + + self.model_cfg = model_cfg + self.nx, self.ny, self.nz = self.model_cfg.INPUT_SHAPE + self.num_bev_features = self.model_cfg.NUM_BEV_FEATURES + self.num_bev_features_before_compression = self.model_cfg.NUM_BEV_FEATURES // self.nz + + def forward(self, batch_dict, **kwargs): + pillar_features, coords = batch_dict['pillar_features'], batch_dict['voxel_coords'] + + batch_spatial_features = [] + batch_size = coords[:, 0].max().int().item() + 1 + for batch_idx in range(batch_size): + spatial_feature = torch.zeros( + self.num_bev_features_before_compression, + self.nz * self.nx * self.ny, + dtype=pillar_features.dtype, + device=pillar_features.device) + + batch_mask = coords[:, 0] == batch_idx + this_coords = coords[batch_mask, :] + indices = this_coords[:, 1] * self.ny * self.nx + this_coords[:, 2] * self.nx + this_coords[:, 3] + indices = indices.type(torch.long) + pillars = pillar_features[batch_mask, :] + pillars = pillars.t() + spatial_feature[:, indices] = pillars + batch_spatial_features.append(spatial_feature) + + batch_spatial_features = torch.stack(batch_spatial_features, 0) + batch_spatial_features = batch_spatial_features.view(batch_size, self.num_bev_features_before_compression * self.nz, self.ny, self.nx) + batch_dict['spatial_features'] = batch_spatial_features + return batch_dict \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/__init__.py b/toolbox/openpcdet/pcdet/models/backbones_3d/__init__.py new file mode 100644 index 000000000..0a25c626a --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/__init__.py @@ -0,0 +1,22 @@ +from .pointnet2_backbone import PointNet2Backbone, PointNet2MSG +from .spconv_backbone import VoxelBackBone8x, VoxelResBackBone8x +from .spconv_backbone_2d import PillarBackBone8x, PillarRes18BackBone8x +from .spconv_backbone_focal import VoxelBackBone8xFocal +from .spconv_backbone_voxelnext import VoxelResBackBone8xVoxelNeXt +from .spconv_backbone_voxelnext2d import VoxelResBackBone8xVoxelNeXt2D +from .spconv_unet import UNetV2 +from .dsvt import DSVT + +__all__ = { + 'VoxelBackBone8x': VoxelBackBone8x, + 'UNetV2': UNetV2, + 'PointNet2Backbone': PointNet2Backbone, + 'PointNet2MSG': PointNet2MSG, + 'VoxelResBackBone8x': VoxelResBackBone8x, + 'VoxelBackBone8xFocal': VoxelBackBone8xFocal, + 'VoxelResBackBone8xVoxelNeXt': VoxelResBackBone8xVoxelNeXt, + 'VoxelResBackBone8xVoxelNeXt2D': VoxelResBackBone8xVoxelNeXt2D, + 'PillarBackBone8x': PillarBackBone8x, + 'PillarRes18BackBone8x': PillarRes18BackBone8x, + 'DSVT': DSVT, +} diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/dsvt.py b/toolbox/openpcdet/pcdet/models/backbones_3d/dsvt.py new file mode 100644 index 000000000..8c3e27936 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/dsvt.py @@ -0,0 +1,616 @@ +import torch +import torch.nn as nn +from torch.utils.checkpoint import checkpoint +from math import ceil + +from pcdet.models.model_utils.dsvt_utils import get_window_coors, get_inner_win_inds_cuda, get_pooling_index, get_continous_inds +from pcdet.models.model_utils.dsvt_utils import PositionEmbeddingLearned + + +class DSVT(nn.Module): + '''Dynamic Sparse Voxel Transformer Backbone. + Args: + INPUT_LAYER: Config of input layer, which converts the output of vfe to dsvt input. + block_name (list[string]): Name of blocks for each stage. Length: stage_num. + set_info (list[list[int, int]]): A list of set config for each stage. Eelement i contains + [set_size, block_num], where set_size is the number of voxel in a set and block_num is the + number of blocks for stage i. Length: stage_num. + d_model (list[int]): Number of input channels for each stage. Length: stage_num. + nhead (list[int]): Number of attention heads for each stage. Length: stage_num. + dim_feedforward (list[int]): Dimensions of the feedforward network in set attention for each stage. + Length: stage num. + dropout (float): Drop rate of set attention. + activation (string): Name of activation layer in set attention. + reduction_type (string): Pooling method between stages. One of: "attention", "maxpool", "linear". + output_shape (tuple[int, int]): Shape of output bev feature. + conv_out_channel (int): Number of output channels. + + ''' + def __init__(self, model_cfg, **kwargs): + super().__init__() + + self.model_cfg = model_cfg + self.input_layer = DSVTInputLayer(self.model_cfg.INPUT_LAYER) + block_name = self.model_cfg.block_name + set_info = self.model_cfg.set_info + d_model = self.model_cfg.d_model + nhead = self.model_cfg.nhead + dim_feedforward = self.model_cfg.dim_feedforward + dropout = self.model_cfg.dropout + activation = self.model_cfg.activation + self.reduction_type = self.model_cfg.get('reduction_type', 'attention') + # save GPU memory + self.use_torch_ckpt = self.model_cfg.get('USE_CHECKPOINT', False) + + # Sparse Regional Attention Blocks + stage_num = len(block_name) + for stage_id in range(stage_num): + num_blocks_this_stage = set_info[stage_id][-1] + dmodel_this_stage = d_model[stage_id] + dfeed_this_stage = dim_feedforward[stage_id] + num_head_this_stage = nhead[stage_id] + block_name_this_stage = block_name[stage_id] + block_module = _get_block_module(block_name_this_stage) + block_list=[] + norm_list=[] + for i in range(num_blocks_this_stage): + block_list.append( + block_module(dmodel_this_stage, num_head_this_stage, dfeed_this_stage, + dropout, activation, batch_first=True) + ) + norm_list.append(nn.LayerNorm(dmodel_this_stage)) + self.__setattr__(f'stage_{stage_id}', nn.ModuleList(block_list)) + self.__setattr__(f'residual_norm_stage_{stage_id}', nn.ModuleList(norm_list)) + + # apply pooling except the last stage + if stage_id < stage_num-1: + downsample_window = self.model_cfg.INPUT_LAYER.downsample_stride[stage_id] + dmodel_next_stage = d_model[stage_id+1] + pool_volume = torch.IntTensor(downsample_window).prod().item() + if self.reduction_type == 'linear': + cat_feat_dim = dmodel_this_stage * torch.IntTensor(downsample_window).prod().item() + self.__setattr__(f'stage_{stage_id}_reduction', Stage_Reduction_Block(cat_feat_dim, dmodel_next_stage)) + elif self.reduction_type == 'maxpool': + self.__setattr__(f'stage_{stage_id}_reduction', torch.nn.MaxPool1d(pool_volume)) + elif self.reduction_type == 'attention': + self.__setattr__(f'stage_{stage_id}_reduction', Stage_ReductionAtt_Block(dmodel_this_stage, pool_volume)) + else: + raise NotImplementedError + + self.num_shifts = [2] * stage_num + self.output_shape = self.model_cfg.output_shape + self.stage_num = stage_num + self.set_info = set_info + self.num_point_features = self.model_cfg.conv_out_channel + + self._reset_parameters() + + def forward(self, batch_dict): + ''' + Args: + bacth_dict (dict): + The dict contains the following keys + - voxel_features (Tensor[float]): Voxel features after VFE. Shape of (N, d_model[0]), + where N is the number of input voxels. + - voxel_coords (Tensor[int]): Shape of (N, 4), corresponding voxel coordinates of each voxels. + Each row is (batch_id, z, y, x). + - ... + + Returns: + bacth_dict (dict): + The dict contains the following keys + - pillar_features (Tensor[float]): + - voxel_coords (Tensor[int]): + - ... + ''' + voxel_info = self.input_layer(batch_dict) + + voxel_feat = voxel_info['voxel_feats_stage0'] + set_voxel_inds_list = [[voxel_info[f'set_voxel_inds_stage{s}_shift{i}'] for i in range(self.num_shifts[s])] for s in range(self.stage_num)] + set_voxel_masks_list = [[voxel_info[f'set_voxel_mask_stage{s}_shift{i}'] for i in range(self.num_shifts[s])] for s in range(self.stage_num)] + pos_embed_list = [[[voxel_info[f'pos_embed_stage{s}_block{b}_shift{i}'] for i in range(self.num_shifts[s])] for b in range(self.set_info[s][1])] for s in range(self.stage_num)] + pooling_mapping_index = [voxel_info[f'pooling_mapping_index_stage{s+1}'] for s in range(self.stage_num-1)] + pooling_index_in_pool = [voxel_info[f'pooling_index_in_pool_stage{s+1}'] for s in range(self.stage_num-1)] + pooling_preholder_feats = [voxel_info[f'pooling_preholder_feats_stage{s+1}'] for s in range(self.stage_num-1)] + + output = voxel_feat + block_id = 0 + for stage_id in range(self.stage_num): + block_layers = self.__getattr__(f'stage_{stage_id}') + residual_norm_layers = self.__getattr__(f'residual_norm_stage_{stage_id}') + for i in range(len(block_layers)): + block = block_layers[i] + residual = output.clone() + if self.use_torch_ckpt==False: + output = block(output, set_voxel_inds_list[stage_id], set_voxel_masks_list[stage_id], pos_embed_list[stage_id][i], \ + block_id=block_id) + else: + output = checkpoint(block, output, set_voxel_inds_list[stage_id], set_voxel_masks_list[stage_id], pos_embed_list[stage_id][i], block_id) + output = residual_norm_layers[i](output + residual) + block_id += 1 + if stage_id < self.stage_num - 1: + # pooling + prepool_features = pooling_preholder_feats[stage_id].type_as(output) + pooled_voxel_num = prepool_features.shape[0] + pool_volume = prepool_features.shape[1] + prepool_features[pooling_mapping_index[stage_id], pooling_index_in_pool[stage_id]] = output + prepool_features = prepool_features.view(prepool_features.shape[0], -1) + + if self.reduction_type == 'linear': + output = self.__getattr__(f'stage_{stage_id}_reduction')(prepool_features) + elif self.reduction_type == 'maxpool': + prepool_features = prepool_features.view(pooled_voxel_num, pool_volume, -1).permute(0, 2, 1) + output = self.__getattr__(f'stage_{stage_id}_reduction')(prepool_features).squeeze(-1) + elif self.reduction_type == 'attention': + prepool_features = prepool_features.view(pooled_voxel_num, pool_volume, -1).permute(0, 2, 1) + key_padding_mask = torch.zeros((pooled_voxel_num, pool_volume)).to(prepool_features.device).int() + output = self.__getattr__(f'stage_{stage_id}_reduction')(prepool_features, key_padding_mask) + else: + raise NotImplementedError + + batch_dict['pillar_features'] = batch_dict['voxel_features'] = output + batch_dict['voxel_coords'] = voxel_info[f'voxel_coors_stage{self.stage_num - 1}'] + return batch_dict + + def _reset_parameters(self): + for name, p in self.named_parameters(): + if p.dim() > 1 and 'scaler' not in name: + nn.init.xavier_uniform_(p) + + +class DSVTBlock(nn.Module): + ''' Consist of two encoder layer, shift and shift back. + ''' + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, + activation="relu", batch_first=True): + super().__init__() + + encoder_1 = DSVT_EncoderLayer(d_model, nhead, dim_feedforward, dropout, + activation, batch_first) + encoder_2 = DSVT_EncoderLayer(d_model, nhead, dim_feedforward, dropout, + activation, batch_first) + self.encoder_list = nn.ModuleList([encoder_1, encoder_2]) + + def forward( + self, + src, + set_voxel_inds_list, + set_voxel_masks_list, + pos_embed_list, + block_id, + ): + num_shifts = 2 + output = src + # TODO: bug to be fixed, mismatch of pos_embed + for i in range(num_shifts): + set_id = i + shift_id = block_id % 2 + pos_embed_id = i + set_voxel_inds = set_voxel_inds_list[shift_id][set_id] + set_voxel_masks = set_voxel_masks_list[shift_id][set_id] + pos_embed = pos_embed_list[pos_embed_id] + layer = self.encoder_list[i] + output = layer(output, set_voxel_inds, set_voxel_masks, pos_embed) + + return output + + +class DSVT_EncoderLayer(nn.Module): + + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, + activation="relu", batch_first=True, mlp_dropout=0): + super().__init__() + self.win_attn = SetAttention(d_model, nhead, dropout, dim_feedforward, activation, batch_first, mlp_dropout) + self.norm = nn.LayerNorm(d_model) + self.d_model = d_model + + def forward(self,src,set_voxel_inds,set_voxel_masks,pos=None): + identity = src + src = self.win_attn(src, pos, set_voxel_masks, set_voxel_inds) + src = src + identity + src = self.norm(src) + + return src + +class SetAttention(nn.Module): + + def __init__(self, d_model, nhead, dropout, dim_feedforward=2048, activation="relu", batch_first=True, mlp_dropout=0): + super().__init__() + self.nhead = nhead + if batch_first: + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout, batch_first=batch_first) + else: + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(mlp_dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + self.d_model = d_model + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.dropout1 = nn.Identity() + self.dropout2 = nn.Identity() + + self.activation = _get_activation_fn(activation) + + def forward(self, src, pos=None, key_padding_mask=None, voxel_inds=None): + ''' + Args: + src (Tensor[float]): Voxel features with shape (N, C), where N is the number of voxels. + pos (Tensor[float]): Position embedding vectors with shape (N, C). + key_padding_mask (Tensor[bool]): Mask for redundant voxels within set. Shape of (set_num, set_size). + voxel_inds (Tensor[int]): Voxel indexs for each set. Shape of (set_num, set_size). + Returns: + src (Tensor[float]): Voxel features. + ''' + set_features = src[voxel_inds] + if pos is not None: + set_pos = pos[voxel_inds] + else: + set_pos = None + if pos is not None: + query = set_features + set_pos + key = set_features + set_pos + value = set_features + + if key_padding_mask is not None: + src2 = self.self_attn(query, key, value, key_padding_mask)[0] + else: + src2 = self.self_attn(query, key, value)[0] + + # map voxel featurs from set space to voxel space: (set_num, set_size, C) --> (N, C) + flatten_inds = voxel_inds.reshape(-1) + unique_flatten_inds, inverse = torch.unique(flatten_inds, return_inverse=True) + perm = torch.arange(inverse.size(0), dtype=inverse.dtype, device=inverse.device) + inverse, perm = inverse.flip([0]), perm.flip([0]) + perm = inverse.new_empty(unique_flatten_inds.size(0)).scatter_(0, inverse, perm) + src2 = src2.reshape(-1, self.d_model)[perm] + + # FFN layer + src = src + self.dropout1(src2) + src = self.norm1(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) + src = src + self.dropout2(src2) + src = self.norm2(src) + + return src + + +class Stage_Reduction_Block(nn.Module): + def __init__(self, input_channel, output_channel): + super().__init__() + self.linear1 = nn.Linear(input_channel, output_channel, bias=False) + self.norm = nn.LayerNorm(output_channel) + + def forward(self, x): + src = x + src = self.norm(self.linear1(x)) + return src + + +class Stage_ReductionAtt_Block(nn.Module): + def __init__(self, input_channel, pool_volume): + super().__init__() + self.pool_volume = pool_volume + self.query_func = torch.nn.MaxPool1d(pool_volume) + self.norm = nn.LayerNorm(input_channel) + self.self_attn = nn.MultiheadAttention(input_channel, 8, batch_first=True) + self.pos_embedding = nn.Parameter(torch.randn(pool_volume, input_channel)) + nn.init.normal_(self.pos_embedding, std=.01) + + def forward(self, x, key_padding_mask): + # x: [voxel_num, c_dim, pool_volume] + src = self.query_func(x).permute(0, 2, 1) # voxel_num, 1, c_dim + key = value = x.permute(0, 2, 1) + key = key + self.pos_embedding.unsqueeze(0).repeat(src.shape[0], 1, 1) + query = src.clone() + output = self.self_attn(query, key, value, key_padding_mask)[0] + src = self.norm(output + src).squeeze(1) + return src + + +def _get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return torch.nn.functional.relu + if activation == "gelu": + return torch.nn.functional.gelu + if activation == "glu": + return torch.nn.functional.glu + raise RuntimeError(F"activation should be relu/gelu, not {activation}.") + + +def _get_block_module(name): + """Return an block module given a string""" + if name == "DSVTBlock": + return DSVTBlock + raise RuntimeError(F"This Block not exist.") + + +class DSVTInputLayer(nn.Module): + ''' + This class converts the output of vfe to dsvt input. + We do in this class: + 1. Window partition: partition voxels to non-overlapping windows. + 2. Set partition: generate non-overlapped and size-equivalent local sets within each window. + 3. Pre-compute the downsample infomation between two consecutive stages. + 4. Pre-compute the position embedding vectors. + + Args: + sparse_shape (tuple[int, int, int]): Shape of input space (xdim, ydim, zdim). + window_shape (list[list[int, int, int]]): Window shapes (winx, winy, winz) in different stages. Length: stage_num. + downsample_stride (list[list[int, int, int]]): Downsample strides between two consecutive stages. + Element i is [ds_x, ds_y, ds_z], which is used between stage_i and stage_{i+1}. Length: stage_num - 1. + d_model (list[int]): Number of input channels for each stage. Length: stage_num. + set_info (list[list[int, int]]): A list of set config for each stage. Eelement i contains + [set_size, block_num], where set_size is the number of voxel in a set and block_num is the + number of blocks for stage i. Length: stage_num. + hybrid_factor (list[int, int, int]): Control the window shape in different blocks. + e.g. for block_{0} and block_{1} in stage_0, window shapes are [win_x, win_y, win_z] and + [win_x * h[0], win_y * h[1], win_z * h[2]] respectively. + shift_list (list): Shift window. Length: stage_num. + normalize_pos (bool): Whether to normalize coordinates in position embedding. + ''' + def __init__(self, model_cfg): + super().__init__() + + self.model_cfg = model_cfg + self.sparse_shape = self.model_cfg.sparse_shape + self.window_shape = self.model_cfg.window_shape + self.downsample_stride = self.model_cfg.downsample_stride + self.d_model = self.model_cfg.d_model + self.set_info = self.model_cfg.set_info + self.stage_num = len(self.d_model) + + self.hybrid_factor = self.model_cfg.hybrid_factor + self.window_shape = [[self.window_shape[s_id], [self.window_shape[s_id][coord_id] * self.hybrid_factor[coord_id] \ + for coord_id in range(3)]] for s_id in range(self.stage_num)] + self.shift_list = self.model_cfg.shifts_list + self.normalize_pos = self.model_cfg.normalize_pos + + self.num_shifts = [2,] * len(self.window_shape) + + self.sparse_shape_list = [self.sparse_shape] + # compute sparse shapes for each stage + for ds_stride in self.downsample_stride: + last_sparse_shape = self.sparse_shape_list[-1] + self.sparse_shape_list.append((ceil(last_sparse_shape[0]/ds_stride[0]), ceil(last_sparse_shape[1]/ds_stride[1]), ceil(last_sparse_shape[2]/ds_stride[2]))) + + # position embedding layers + self.posembed_layers = nn.ModuleList() + for i in range(len(self.set_info)): + input_dim = 3 if self.sparse_shape_list[i][-1] > 1 else 2 + stage_posembed_layers = nn.ModuleList() + for j in range(self.set_info[i][1]): + block_posembed_layers = nn.ModuleList() + for s in range(self.num_shifts[i]): + block_posembed_layers.append(PositionEmbeddingLearned(input_dim, self.d_model[i])) + stage_posembed_layers.append(block_posembed_layers) + self.posembed_layers.append(stage_posembed_layers) + + def forward(self, batch_dict): + ''' + Args: + bacth_dict (dict): + The dict contains the following keys + - voxel_features (Tensor[float]): Voxel features after VFE with shape (N, d_model[0]), + where N is the number of input voxels. + - voxel_coords (Tensor[int]): Shape of (N, 4), corresponding voxel coordinates of each voxels. + Each row is (batch_id, z, y, x). + - ... + + Returns: + voxel_info (dict): + The dict contains the following keys + - voxel_coors_stage{i} (Tensor[int]): Shape of (N_i, 4). N is the number of voxels in stage_i. + Each row is (batch_id, z, y, x). + - set_voxel_inds_stage{i}_shift{j} (Tensor[int]): Set partition index with shape (2, set_num, set_info[i][0]). + 2 indicates x-axis partition and y-axis partition. + - set_voxel_mask_stage{i}_shift{i} (Tensor[bool]): Key mask used in set attention with shape (2, set_num, set_info[i][0]). + - pos_embed_stage{i}_block{i}_shift{i} (Tensor[float]): Position embedding vectors with shape (N_i, d_model[i]). N_i is the + number of remain voxels in stage_i; + - pooling_mapping_index_stage{i} (Tensor[int]): Pooling region index used in pooling operation between stage_{i-1} and stage_{i} + with shape (N_{i-1}). + - pooling_index_in_pool_stage{i} (Tensor[int]): Index inner region with shape (N_{i-1}). Combined with pooling_mapping_index_stage{i}, + we can map each voxel in satge_{i-1} to pooling_preholder_feats_stage{i}, which are input of downsample operation. + - pooling_preholder_feats_stage{i} (Tensor[int]): Preholder features initial with value 0. + Shape of (N_{i}, downsample_stride[i-1].prob(), d_moel[i-1]), where prob() returns the product of all elements. + - ... + ''' + voxel_feats = batch_dict['voxel_features'] + voxel_coors = batch_dict['voxel_coords'].long() + + voxel_info = {} + voxel_info['voxel_feats_stage0'] = voxel_feats.clone() + voxel_info['voxel_coors_stage0'] = voxel_coors.clone() + + for stage_id in range(self.stage_num): + # window partition of corrsponding stage-map + voxel_info = self.window_partition(voxel_info, stage_id) + # generate set id of corrsponding stage-map + voxel_info = self.get_set(voxel_info, stage_id) + for block_id in range(self.set_info[stage_id][1]): + for shift_id in range(self.num_shifts[stage_id]): + voxel_info[f'pos_embed_stage{stage_id}_block{block_id}_shift{shift_id}'] = \ + self.get_pos_embed(voxel_info[f'coors_in_win_stage{stage_id}_shift{shift_id}'], stage_id, block_id, shift_id) + + # compute pooling information + if stage_id < self.stage_num - 1: + voxel_info = self.subm_pooling(voxel_info, stage_id) + + return voxel_info + + @torch.no_grad() + def subm_pooling(self, voxel_info, stage_id): + # x,y,z stride + cur_stage_downsample = self.downsample_stride[stage_id] + # batch_win_coords is from 1 of x, y + batch_win_inds, _, index_in_win, batch_win_coors = get_pooling_index(voxel_info[f'voxel_coors_stage{stage_id}'], self.sparse_shape_list[stage_id], cur_stage_downsample) + # compute pooling mapping index + unique_batch_win_inds, contiguous_batch_win_inds = torch.unique(batch_win_inds, return_inverse=True) + voxel_info[f'pooling_mapping_index_stage{stage_id+1}'] = contiguous_batch_win_inds + + # generate empty placeholder features + placeholder_prepool_feats = voxel_info[f'voxel_feats_stage0'].new_zeros((len(unique_batch_win_inds), + torch.prod(torch.IntTensor(cur_stage_downsample)).item(), self.d_model[stage_id])) + voxel_info[f'pooling_index_in_pool_stage{stage_id+1}'] = index_in_win + voxel_info[f'pooling_preholder_feats_stage{stage_id+1}'] = placeholder_prepool_feats + + # compute pooling coordinates + unique, inverse = unique_batch_win_inds.clone(), contiguous_batch_win_inds.clone() + perm = torch.arange(inverse.size(0), dtype=inverse.dtype, device=inverse.device) + inverse, perm = inverse.flip([0]), perm.flip([0]) + perm = inverse.new_empty(unique.size(0)).scatter_(0, inverse, perm) + pool_coors = batch_win_coors[perm] + + voxel_info[f'voxel_coors_stage{stage_id+1}'] = pool_coors + + return voxel_info + + def get_set(self, voxel_info, stage_id): + ''' + This is one of the core operation of DSVT. + Given voxels' window ids and relative-coords inner window, we partition them into window-bounded and size-equivalent local sets. + To make it clear and easy to follow, we do not use loop to process two shifts. + Args: + voxel_info (dict): + The dict contains the following keys + - batch_win_inds_s{i} (Tensor[float]): Windows indexs of each voxel with shape (N), computed by 'window_partition'. + - coors_in_win_shift{i} (Tensor[int]): Relative-coords inner window of each voxel with shape (N, 3), computed by 'window_partition'. + Each row is (z, y, x). + - ... + + Returns: + See from 'forward' function. + ''' + batch_win_inds_shift0 = voxel_info[f'batch_win_inds_stage{stage_id}_shift0'] + coors_in_win_shift0 = voxel_info[f'coors_in_win_stage{stage_id}_shift0'] + set_voxel_inds_shift0 = self.get_set_single_shift(batch_win_inds_shift0, stage_id, shift_id=0, coors_in_win=coors_in_win_shift0) + voxel_info[f'set_voxel_inds_stage{stage_id}_shift0'] = set_voxel_inds_shift0 + # compute key masks, voxel duplication must happen continuously + prefix_set_voxel_inds_s0 = torch.roll(set_voxel_inds_shift0.clone(), shifts=1, dims=-1) + prefix_set_voxel_inds_s0[ :, :, 0] = -1 + set_voxel_mask_s0 = (set_voxel_inds_shift0 == prefix_set_voxel_inds_s0) + voxel_info[f'set_voxel_mask_stage{stage_id}_shift0'] = set_voxel_mask_s0 + + batch_win_inds_shift1 = voxel_info[f'batch_win_inds_stage{stage_id}_shift1'] + coors_in_win_shift1 = voxel_info[f'coors_in_win_stage{stage_id}_shift1'] + set_voxel_inds_shift1 = self.get_set_single_shift(batch_win_inds_shift1, stage_id, shift_id=1, coors_in_win=coors_in_win_shift1) + voxel_info[f'set_voxel_inds_stage{stage_id}_shift1'] = set_voxel_inds_shift1 + # compute key masks, voxel duplication must happen continuously + prefix_set_voxel_inds_s1 = torch.roll(set_voxel_inds_shift1.clone(), shifts=1, dims=-1) + prefix_set_voxel_inds_s1[ :, :, 0] = -1 + set_voxel_mask_s1 = (set_voxel_inds_shift1 == prefix_set_voxel_inds_s1) + voxel_info[f'set_voxel_mask_stage{stage_id}_shift1'] = set_voxel_mask_s1 + + return voxel_info + + def get_set_single_shift(self, batch_win_inds, stage_id, shift_id=None, coors_in_win=None): + device = batch_win_inds.device + # the number of voxels assigned to a set + voxel_num_set = self.set_info[stage_id][0] + # max number of voxels in a window + max_voxel = self.window_shape[stage_id][shift_id][0] * self.window_shape[stage_id][shift_id][1] * self.window_shape[stage_id][shift_id][2] + # get unique set indexs + contiguous_win_inds = torch.unique(batch_win_inds, return_inverse=True)[1] + voxelnum_per_win = torch.bincount(contiguous_win_inds) + win_num = voxelnum_per_win.shape[0] + setnum_per_win_float = voxelnum_per_win / voxel_num_set + setnum_per_win = torch.ceil(setnum_per_win_float).long() + set_win_inds, set_inds_in_win = get_continous_inds(setnum_per_win) + + # compution of Eq.3 in 'DSVT: Dynamic Sparse Voxel Transformer with Rotated Sets' - https://arxiv.org/abs/2301.06051, + # for each window, we can get voxel indexs belong to different sets. + offset_idx = set_inds_in_win[:,None].repeat(1, voxel_num_set) * voxel_num_set + base_idx = torch.arange(0, voxel_num_set, 1, device=device) + base_select_idx = offset_idx + base_idx + base_select_idx = base_select_idx * voxelnum_per_win[set_win_inds][:,None] + base_select_idx = base_select_idx.double() / (setnum_per_win[set_win_inds] * voxel_num_set)[:,None].double() + base_select_idx = torch.floor(base_select_idx) + # obtain unique indexs in whole space + select_idx = base_select_idx + select_idx = select_idx + set_win_inds.view(-1, 1) * max_voxel + + # this function will return unordered inner window indexs of each voxel + inner_voxel_inds = get_inner_win_inds_cuda(contiguous_win_inds) + global_voxel_inds = contiguous_win_inds * max_voxel + inner_voxel_inds + _, order1 = torch.sort(global_voxel_inds) + + # get y-axis partition results + global_voxel_inds_sorty = contiguous_win_inds * max_voxel + \ + coors_in_win[:,1] * self.window_shape[stage_id][shift_id][0] * self.window_shape[stage_id][shift_id][2] + \ + coors_in_win[:,2] * self.window_shape[stage_id][shift_id][2] + \ + coors_in_win[:,0] + _, order2 = torch.sort(global_voxel_inds_sorty) + inner_voxel_inds_sorty = -torch.ones_like(inner_voxel_inds) + inner_voxel_inds_sorty.scatter_(dim=0, index=order2, src=inner_voxel_inds[order1]) # get y-axis ordered inner window indexs of each voxel + voxel_inds_in_batch_sorty = inner_voxel_inds_sorty + max_voxel * contiguous_win_inds + voxel_inds_padding_sorty = -1 * torch.ones((win_num * max_voxel), dtype=torch.long, device=device) + voxel_inds_padding_sorty[voxel_inds_in_batch_sorty] = torch.arange(0, voxel_inds_in_batch_sorty.shape[0], dtype=torch.long, device=device) + set_voxel_inds_sorty = voxel_inds_padding_sorty[select_idx.long()] + + # get x-axis partition results + global_voxel_inds_sortx = contiguous_win_inds * max_voxel + \ + coors_in_win[:,2] * self.window_shape[stage_id][shift_id][1] * self.window_shape[stage_id][shift_id][2] + \ + coors_in_win[:,1] * self.window_shape[stage_id][shift_id][2] + \ + coors_in_win[:,0] + _, order2 = torch.sort(global_voxel_inds_sortx) + inner_voxel_inds_sortx = -torch.ones_like(inner_voxel_inds) + inner_voxel_inds_sortx.scatter_(dim=0,index=order2, src=inner_voxel_inds[order1]) # get x-axis ordered inner window indexs of each voxel + voxel_inds_in_batch_sortx = inner_voxel_inds_sortx + max_voxel * contiguous_win_inds + voxel_inds_padding_sortx = -1 * torch.ones((win_num * max_voxel), dtype=torch.long, device=device) + voxel_inds_padding_sortx[voxel_inds_in_batch_sortx] = torch.arange(0, voxel_inds_in_batch_sortx.shape[0], dtype=torch.long, device=device) + set_voxel_inds_sortx = voxel_inds_padding_sortx[select_idx.long()] + + all_set_voxel_inds = torch.stack((set_voxel_inds_sorty, set_voxel_inds_sortx), dim=0) + return all_set_voxel_inds + + @torch.no_grad() + def window_partition(self, voxel_info, stage_id): + for i in range(2): + batch_win_inds, coors_in_win = get_window_coors(voxel_info[f'voxel_coors_stage{stage_id}'], + self.sparse_shape_list[stage_id], self.window_shape[stage_id][i], i == 1, self.shift_list[stage_id][i]) + + voxel_info[f'batch_win_inds_stage{stage_id}_shift{i}'] = batch_win_inds + voxel_info[f'coors_in_win_stage{stage_id}_shift{i}'] = coors_in_win + + return voxel_info + + def get_pos_embed(self, coors_in_win, stage_id, block_id, shift_id): + ''' + Args: + coors_in_win: shape=[N, 3], order: z, y, x + ''' + # [N,] + window_shape = self.window_shape[stage_id][shift_id] + + embed_layer = self.posembed_layers[stage_id][block_id][shift_id] + if len(window_shape) == 2: + ndim = 2 + win_x, win_y = window_shape + win_z = 0 + elif window_shape[-1] == 1: + ndim = 2 + win_x, win_y = window_shape[:2] + win_z = 0 + else: + win_x, win_y, win_z = window_shape + ndim = 3 + + assert coors_in_win.size(1) == 3 + z, y, x = coors_in_win[:, 0] - win_z/2, coors_in_win[:, 1] - win_y/2, coors_in_win[:, 2] - win_x/2 + + if self.normalize_pos: + x = x / win_x * 2 * 3.1415 #[-pi, pi] + y = y / win_y * 2 * 3.1415 #[-pi, pi] + z = z / win_z * 2 * 3.1415 #[-pi, pi] + + if ndim==2: + location = torch.stack((x, y), dim=-1) + else: + location = torch.stack((x, y, z), dim=-1) + pos_embed = embed_layer(location) + + return pos_embed + diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/SemanticSeg/basic_blocks.py b/toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/SemanticSeg/basic_blocks.py new file mode 100755 index 000000000..de29b4b99 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/SemanticSeg/basic_blocks.py @@ -0,0 +1,65 @@ +import torch.nn as nn + +class BasicBlock1D(nn.Module): + + def __init__(self, in_channels, out_channels, **kwargs): + """ + Initializes convolutional block + Args: + in_channels: int, Number of input channels + out_channels: int, Number of output channels + **kwargs: Dict, Extra arguments for nn.Conv2d + """ + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.conv = nn.Conv1d(in_channels=in_channels, + out_channels=out_channels, + **kwargs) + self.bn = nn.BatchNorm1d(out_channels) + self.relu = nn.ReLU(inplace=True) + + def forward(self, features): + """ + Applies convolutional block + Args: + features: (B, C_in, H, W), Input features + Returns: + x: (B, C_out, H, W), Output features + """ + x = self.conv(features) + x = self.bn(x) + x = self.relu(x) + return x + +class BasicBlock2D(nn.Module): + + def __init__(self, in_channels, out_channels, **kwargs): + """ + Initializes convolutional block + Args: + in_channels: int, Number of input channels + out_channels: int, Number of output channels + **kwargs: Dict, Extra arguments for nn.Conv2d + """ + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.conv = nn.Conv2d(in_channels=in_channels, + out_channels=out_channels, + **kwargs) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU(inplace=True) + + def forward(self, features): + """ + Applies convolutional block + Args: + features: (B, C_in, H, W), Input features + Returns: + x: (B, C_out, H, W), Output features + """ + x = self.conv(features) + x = self.bn(x) + x = self.relu(x) + return x diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/SemanticSeg/pyramid_ffn.py b/toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/SemanticSeg/pyramid_ffn.py new file mode 100755 index 000000000..29773a027 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/SemanticSeg/pyramid_ffn.py @@ -0,0 +1,77 @@ +import torch +import torch.nn as nn +from .basic_blocks import BasicBlock2D +from .sem_deeplabv3 import SemDeepLabV3 + +class PyramidFeat2D(nn.Module): + + def __init__(self, optimize, model_cfg): + """ + Initialize 2D feature network via pretrained model + Args: + model_cfg: EasyDict, Dense classification network config + """ + super().__init__() + self.model_cfg = model_cfg + self.is_optimize = optimize + + # Create modules + self.ifn = SemDeepLabV3( + num_classes=model_cfg.num_class, + backbone_name=model_cfg.backbone, + **model_cfg.args + ) + self.reduce_blocks = torch.nn.ModuleList() + self.out_channels = {} + for _idx, _channel in enumerate(model_cfg.channel_reduce["in_channels"]): + _channel_out = model_cfg.channel_reduce["out_channels"][_idx] + self.out_channels[model_cfg.args['feat_extract_layer'][_idx]] = _channel_out + block_cfg = {"in_channels": _channel, + "out_channels": _channel_out, + "kernel_size": model_cfg.channel_reduce["kernel_size"][_idx], + "stride": model_cfg.channel_reduce["stride"][_idx], + "bias": model_cfg.channel_reduce["bias"][_idx]} + self.reduce_blocks.append(BasicBlock2D(**block_cfg)) + + def get_output_feature_dim(self): + return self.out_channels + + def forward(self, images): + """ + Predicts depths and creates image depth feature volume using depth distributions + Args: + images: (N, 3, H_in, W_in), Input images + Returns: + batch_dict: + frustum_features: (N, C, D, H_out, W_out), Image depth features + """ + # Pixel-wise depth classification + batch_dict = {} + ifn_result = self.ifn(images) + + for _idx, _layer in enumerate(self.model_cfg.args['feat_extract_layer']): + image_features = ifn_result[_layer] + # Channel reduce + if self.reduce_blocks[_idx] is not None: + image_features = self.reduce_blocks[_idx](image_features) + + batch_dict[_layer+"_feat2d"] = image_features + + if self.training: + # detach feature from graph if not optimize + if "logits" in ifn_result: + ifn_result["logits"].detach_() + if not self.is_optimize: + image_features.detach_() + + return batch_dict + + def get_loss(self): + """ + Gets loss + Args: + Returns: + loss: (1), Network loss + tb_dict: dict[float], All losses to log in tensorboard + """ + return None, None diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/SemanticSeg/sem_deeplabv3.py b/toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/SemanticSeg/sem_deeplabv3.py new file mode 100755 index 000000000..9abf89818 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/SemanticSeg/sem_deeplabv3.py @@ -0,0 +1,160 @@ +from collections import OrderedDict +from pathlib import Path +from torch import hub + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torchvision + + +class SegTemplate(nn.Module): + def __init__(self, constructor, feat_extract_layer, num_classes, pretrained_path=None, aux_loss=None): + """ + Initializes depth distribution network. + Args: + constructor: function, Model constructor + feat_extract_layer: string, Layer to extract features from + num_classes: int, Number of classes + pretrained_path: string, (Optional) Path of the model to load weights from + aux_loss: bool, Flag to include auxillary loss + """ + super().__init__() + self.num_classes = num_classes + self.pretrained_path = pretrained_path + self.pretrained = pretrained_path is not None + self.aux_loss = aux_loss + + if self.pretrained: + # Preprocess Module + self.norm_mean = torch.Tensor([0.485, 0.456, 0.406]) + self.norm_std = torch.Tensor([0.229, 0.224, 0.225]) + + # Model + self.model = self.get_model(constructor=constructor) + self.feat_extract_layer = feat_extract_layer + + return_layers = {_layer:_layer for _layer in feat_extract_layer} + self.model.backbone.return_layers.update(return_layers) + + + def get_model(self, constructor): + """ + Get model + Args: + constructor: function, Model constructor + Returns: + model: nn.Module, Model + """ + # Get model + model = constructor(pretrained=False, + pretrained_backbone=False, + num_classes=self.num_classes, + aux_loss=self.aux_loss) + # Update weights + if self.pretrained_path is not None: + model_dict = model.state_dict() + + # Download pretrained model if not available yet + checkpoint_path = Path(self.pretrained_path) + if not checkpoint_path.exists(): + checkpoint = checkpoint_path.name + save_dir = checkpoint_path.parent + save_dir.mkdir(parents=True, exist_ok=True) + url = f'https://download.pytorch.org/models/{checkpoint}' + hub.load_state_dict_from_url(url, save_dir) + + # Get pretrained state dict + pretrained_dict = torch.load(self.pretrained_path) + #pretrained_dict = self.filter_pretrained_dict(model_dict=model_dict, pretrained_dict=pretrained_dict) + + # Update current model state dict + model_dict.update(pretrained_dict) + model.load_state_dict(model_dict, strict=False) + + return model.cuda() + + def filter_pretrained_dict(self, model_dict, pretrained_dict): + """ + Removes layers from pretrained state dict that are not used or changed in model + Args: + model_dict: dict, Default model state dictionary + pretrained_dict: dict, Pretrained model state dictionary + Returns: + pretrained_dict: dict, Pretrained model state dictionary with removed weights + """ + # Removes aux classifier weights if not used + if "aux_classifier.0.weight" in pretrained_dict and "aux_classifier.0.weight" not in model_dict: + pretrained_dict = {key: value for key, value in pretrained_dict.items() + if "aux_classifier" not in key} + + # Removes final conv layer from weights if number of classes are different + model_num_classes = model_dict["classifier.4.weight"].shape[0] + pretrained_num_classes = pretrained_dict["classifier.4.weight"].shape[0] + if model_num_classes != pretrained_num_classes: + pretrained_dict.pop("classifier.4.weight") + pretrained_dict.pop("classifier.4.bias") + + return pretrained_dict + + def forward(self, images): + """ + Forward pass + Args: + images: (N, 3, H_in, W_in), Input images + Returns + result: dict[torch.Tensor], Depth distribution result + features: (N, C, H_out, W_out), Image features + logits: (N, num_classes, H_out, W_out), Classification logits + aux: (N, num_classes, H_out, W_out), Auxillary classification logits + """ + + # Preprocess images + if self.pretrained: + images = (images - self.norm_mean[None, :, None, None].type_as(images)) / self.norm_std[None, :, None, None].type_as(images) + x = images.cuda() + + # Extract features + result = OrderedDict() + features = self.model.backbone(x) + for _layer in self.feat_extract_layer: + result[_layer] = features[_layer] + return result + + if 'features' in features.keys(): + feat_shape = features['features'].shape[-2:] + else: + feat_shape = features['layer1'].shape[-2:] + + # Prediction classification logits + x = features["out"] # comment the classifier to reduce memory + # x = self.model.classifier(x) + # x = F.interpolate(x, size=feat_shape, mode='bilinear', align_corners=False) + result["logits"] = x + + # Prediction auxillary classification logits + if self.model.aux_classifier is not None: + x = features["aux"] + x = self.model.aux_classifier(x) + x = F.interpolate(x, size=feat_shape, mode='bilinear', align_corners=False) + result["aux"] = x + + return result + + +class SemDeepLabV3(SegTemplate): + + def __init__(self, backbone_name, **kwargs): + """ + Initializes SemDeepLabV3 model + Args: + backbone_name: string, ResNet Backbone Name [ResNet50/ResNet101] + """ + if backbone_name == "ResNet50": + constructor = torchvision.models.segmentation.deeplabv3_resnet50 + elif backbone_name == "ResNet101": + constructor = torchvision.models.segmentation.deeplabv3_resnet101 + else: + raise NotImplementedError + + super().__init__(constructor=constructor, **kwargs) diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/focal_sparse_conv.py b/toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/focal_sparse_conv.py new file mode 100644 index 000000000..79dfa26ba --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/focal_sparse_conv.py @@ -0,0 +1,224 @@ +import torch +import torch.nn as nn +from pcdet.utils.spconv_utils import spconv +from pcdet.ops.roiaware_pool3d.roiaware_pool3d_utils import points_in_boxes_gpu +from pcdet.models.backbones_3d.focal_sparse_conv.focal_sparse_utils import split_voxels, check_repeat, FocalLoss +from pcdet.utils import common_utils + + +class FocalSparseConv(spconv.SparseModule): + expansion = 1 + + def __init__(self, inplanes, planes, voxel_stride, norm_fn=None, indice_key=None, + image_channel=3, kernel_size=3, padding=1, mask_multi=False, use_img=False, + topk=False, threshold=0.5, skip_mask_kernel=False, enlarge_voxel_channels=-1, + point_cloud_range=[-3, -40, 0, 1, 40, 70.4], + voxel_size = [0.1, 0.05, 0.05]): + super(FocalSparseConv, self).__init__() + + self.conv = spconv.SubMConv3d(inplanes, planes, kernel_size=kernel_size, stride=1, bias=False, indice_key=indice_key) + self.bn1 = norm_fn(planes) + self.relu = nn.ReLU(True) + offset_channels = kernel_size**3 + + self.topk = topk + self.threshold = threshold + self.voxel_stride = voxel_stride + self.focal_loss = FocalLoss() + self.mask_multi = mask_multi + self.skip_mask_kernel = skip_mask_kernel + self.use_img = use_img + + voxel_channel = enlarge_voxel_channels if enlarge_voxel_channels>0 else inplanes + in_channels = image_channel + voxel_channel if use_img else voxel_channel + + self.conv_enlarge = spconv.SparseSequential(spconv.SubMConv3d(inplanes, enlarge_voxel_channels, + kernel_size=3, stride=1, padding=1, bias=False, indice_key=indice_key+'_enlarge'), + norm_fn(enlarge_voxel_channels), + nn.ReLU(True)) if enlarge_voxel_channels>0 else None + + self.conv_imp = spconv.SubMConv3d(in_channels, offset_channels, kernel_size=3, stride=1, padding=1, bias=False, indice_key=indice_key+'_imp') + + _step = int(kernel_size//2) + kernel_offsets = [[i, j, k] for i in range(-_step, _step+1) for j in range(-_step, _step+1) for k in range(-_step, _step+1)] + kernel_offsets.remove([0, 0, 0]) + self.kernel_offsets = torch.Tensor(kernel_offsets).cuda() + self.inv_idx = torch.Tensor([2, 1, 0]).long().cuda() + self.point_cloud_range = torch.Tensor(point_cloud_range).cuda() + self.voxel_size = torch.Tensor(voxel_size).cuda() + + def construct_multimodal_features(self, x, x_rgb, batch_dict, fuse_sum=False): + """ + Construct the multimodal features with both lidar sparse features and image features. + Args: + x: [N, C] lidar sparse features + x_rgb: [b, c, h, w] image features + batch_dict: input and output information during forward + fuse_sum: bool, manner for fusion, True - sum, False - concat + + Return: + image_with_voxelfeatures: [N, C] fused multimodal features + """ + batch_index = x.indices[:, 0] + spatial_indices = x.indices[:, 1:] * self.voxel_stride + voxels_3d = spatial_indices * self.voxel_size + self.point_cloud_range[:3] + calibs = batch_dict['calib'] + batch_size = batch_dict['batch_size'] + h, w = batch_dict['images'].shape[2:] + + if not x_rgb.shape == batch_dict['images'].shape: + x_rgb = nn.functional.interpolate(x_rgb, (h, w), mode='bilinear') + + image_with_voxelfeatures = [] + voxels_2d_int_list = [] + filter_idx_list = [] + for b in range(batch_size): + x_rgb_batch = x_rgb[b] + + calib = calibs[b] + voxels_3d_batch = voxels_3d[batch_index==b] + voxel_features_sparse = x.features[batch_index==b] + + # Reverse the point cloud transformations to the original coords. + if 'noise_scale' in batch_dict: + voxels_3d_batch[:, :3] /= batch_dict['noise_scale'][b] + if 'noise_rot' in batch_dict: + voxels_3d_batch = common_utils.rotate_points_along_z(voxels_3d_batch[:, self.inv_idx].unsqueeze(0), -batch_dict['noise_rot'][b].unsqueeze(0))[0, :, self.inv_idx] + if 'flip_x' in batch_dict: + voxels_3d_batch[:, 1] *= -1 if batch_dict['flip_x'][b] else 1 + if 'flip_y' in batch_dict: + voxels_3d_batch[:, 2] *= -1 if batch_dict['flip_y'][b] else 1 + + voxels_2d, _ = calib.lidar_to_img(voxels_3d_batch[:, self.inv_idx].cpu().numpy()) + + voxels_2d_int = torch.Tensor(voxels_2d).to(x_rgb_batch.device).long() + + filter_idx = (0<=voxels_2d_int[:, 1]) * (voxels_2d_int[:, 1] < h) * (0<=voxels_2d_int[:, 0]) * (voxels_2d_int[:, 0] < w) + + filter_idx_list.append(filter_idx) + voxels_2d_int = voxels_2d_int[filter_idx] + voxels_2d_int_list.append(voxels_2d_int) + + image_features_batch = torch.zeros((voxel_features_sparse.shape[0], x_rgb_batch.shape[0]), device=x_rgb_batch.device) + image_features_batch[filter_idx] = x_rgb_batch[:, voxels_2d_int[:, 1], voxels_2d_int[:, 0]].permute(1, 0) + + if fuse_sum: + image_with_voxelfeature = image_features_batch + voxel_features_sparse + else: + image_with_voxelfeature = torch.cat([image_features_batch, voxel_features_sparse], dim=1) + + image_with_voxelfeatures.append(image_with_voxelfeature) + + image_with_voxelfeatures = torch.cat(image_with_voxelfeatures) + return image_with_voxelfeatures + + def _gen_sparse_features(self, x, imps_3d, batch_dict, voxels_3d): + """ + Generate the output sparse features from the focal sparse conv. + Args: + x: [N, C], lidar sparse features + imps_3d: [N, kernelsize**3], the predicted importance values + batch_dict: input and output information during forward + voxels_3d: [N, 3], the 3d positions of voxel centers + """ + batch_size = x.batch_size + voxel_features_fore = [] + voxel_indices_fore = [] + voxel_features_back = [] + voxel_indices_back = [] + + box_of_pts_cls_targets = [] + mask_voxels = [] + mask_kernel_list = [] + + for b in range(batch_size): + if self.training: + index = x.indices[:, 0] + batch_index = index==b + mask_voxel = imps_3d[batch_index, -1].sigmoid() + voxels_3d_batch = voxels_3d[batch_index].unsqueeze(0) + mask_voxels.append(mask_voxel) + gt_boxes = batch_dict['gt_boxes'][b, :, :-1].unsqueeze(0) + box_of_pts_batch = points_in_boxes_gpu(voxels_3d_batch[:, :, self.inv_idx], gt_boxes).squeeze(0) + box_of_pts_cls_targets.append(box_of_pts_batch>=0) + + features_fore, indices_fore, features_back, indices_back, mask_kernel = split_voxels(x, b, imps_3d, voxels_3d, self.kernel_offsets, mask_multi=self.mask_multi, topk=self.topk, threshold=self.threshold) + + mask_kernel_list.append(mask_kernel) + voxel_features_fore.append(features_fore) + voxel_indices_fore.append(indices_fore) + voxel_features_back.append(features_back) + voxel_indices_back.append(indices_back) + + voxel_features_fore = torch.cat(voxel_features_fore, dim=0) + voxel_indices_fore = torch.cat(voxel_indices_fore, dim=0) + voxel_features_back = torch.cat(voxel_features_back, dim=0) + voxel_indices_back = torch.cat(voxel_indices_back, dim=0) + mask_kernel = torch.cat(mask_kernel_list, dim=0) + + x_fore = spconv.SparseConvTensor(voxel_features_fore, voxel_indices_fore, x.spatial_shape, x.batch_size) + x_back = spconv.SparseConvTensor(voxel_features_back, voxel_indices_back, x.spatial_shape, x.batch_size) + + loss_box_of_pts = 0 + if self.training: + mask_voxels = torch.cat(mask_voxels) + box_of_pts_cls_targets = torch.cat(box_of_pts_cls_targets) + mask_voxels_two_classes = torch.cat([1-mask_voxels.unsqueeze(-1), mask_voxels.unsqueeze(-1)], dim=1) + loss_box_of_pts = self.focal_loss(mask_voxels_two_classes, box_of_pts_cls_targets.long()) + + return x_fore, x_back, loss_box_of_pts, mask_kernel + + def combine_out(self, x_fore, x_back, remove_repeat=False): + """ + Combine the foreground and background sparse features together. + Args: + x_fore: [N1, C], foreground sparse features + x_back: [N2, C], background sparse features + remove_repeat: bool, whether to remove the spatial replicate features. + """ + x_fore_features = torch.cat([x_fore.features, x_back.features], dim=0) + x_fore_indices = torch.cat([x_fore.indices, x_back.indices], dim=0) + + if remove_repeat: + index = x_fore_indices[:, 0] + features_out_list = [] + indices_coords_out_list = [] + for b in range(x_fore.batch_size): + batch_index = index==b + features_out, indices_coords_out, _ = check_repeat(x_fore_features[batch_index], x_fore_indices[batch_index], flip_first=False) + features_out_list.append(features_out) + indices_coords_out_list.append(indices_coords_out) + x_fore_features = torch.cat(features_out_list, dim=0) + x_fore_indices = torch.cat(indices_coords_out_list, dim=0) + + x_fore = x_fore.replace_feature(x_fore_features) + x_fore.indices = x_fore_indices + + return x_fore + + def forward(self, x, batch_dict, x_rgb=None): + spatial_indices = x.indices[:, 1:] * self.voxel_stride + voxels_3d = spatial_indices * self.voxel_size + self.point_cloud_range[:3] + + if self.use_img: + features_multimodal = self.construct_multimodal_features(x, x_rgb, batch_dict) + x_predict = spconv.SparseConvTensor(features_multimodal, x.indices, x.spatial_shape, x.batch_size) + else: + x_predict = self.conv_enlarge(x) if self.conv_enlarge else x + + imps_3d = self.conv_imp(x_predict).features + + x_fore, x_back, loss_box_of_pts, mask_kernel = self._gen_sparse_features(x, imps_3d, batch_dict, voxels_3d) + + if not self.skip_mask_kernel: + x_fore = x_fore.replace_feature(x_fore.features * mask_kernel.unsqueeze(-1)) + out = self.combine_out(x_fore, x_back, remove_repeat=True) + out = self.conv(out) + + if self.use_img: + out = out.replace_feature(self.construct_multimodal_features(out, x_rgb, batch_dict, True)) + + out = out.replace_feature(self.bn1(out.features)) + out = out.replace_feature(self.relu(out.features)) + + return out, batch_dict, loss_box_of_pts diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/focal_sparse_utils.py b/toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/focal_sparse_utils.py new file mode 100644 index 000000000..d2c234477 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/focal_sparse_conv/focal_sparse_utils.py @@ -0,0 +1,147 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable + + +class FocalLoss(nn.Module): + + def __init__(self, gamma=2.0, eps=1e-7): + super(FocalLoss, self).__init__() + self.gamma = gamma + self.eps = eps + + def one_hot(self, index, classes): + size = index.size() + (classes,) + view = index.size() + (1,) + + mask = torch.Tensor(*size).fill_(0).to(index.device) + + index = index.view(*view) + ones = 1. + + if isinstance(index, Variable): + ones = Variable(torch.Tensor(index.size()).fill_(1).to(index.device)) + mask = Variable(mask, volatile=index.volatile) + + return mask.scatter_(1, index, ones) + + def forward(self, input, target): + y = self.one_hot(target, input.size(-1)) + logit = F.softmax(input, dim=-1) + logit = logit.clamp(self.eps, 1. - self.eps) + + loss = -1 * y * torch.log(logit) # cross entropy + loss = loss * (1 - logit) ** self.gamma # focal loss + + return loss.mean() + +def sort_by_indices(features, indices, features_add=None): + """ + To sort the sparse features with its indices in a convenient manner. + Args: + features: [N, C], sparse features + indices: [N, 4], indices of sparse features + features_add: [N, C], additional features to sort + """ + idx = indices[:, 1:] + idx_sum = idx.select(1, 0) * idx[:, 1].max() * idx[:, 2].max() + idx.select(1, 1) * idx[:, 2].max() + idx.select(1, 2) + _, ind = idx_sum.sort() + features = features[ind] + indices = indices[ind] + if not features_add is None: + features_add = features_add[ind] + return features, indices, features_add + +def check_repeat(features, indices, features_add=None, sort_first=True, flip_first=True): + """ + Check that whether there are replicate indices in the sparse features, + remove the replicate features if any. + """ + if sort_first: + features, indices, features_add = sort_by_indices(features, indices, features_add) + + if flip_first: + features, indices = features.flip([0]), indices.flip([0]) + + if not features_add is None: + features_add=features_add.flip([0]) + + idx = indices[:, 1:].int() + idx_sum = torch.add(torch.add(idx.select(1, 0) * idx[:, 1].max() * idx[:, 2].max(), idx.select(1, 1) * idx[:, 2].max()), idx.select(1, 2)) + _unique, inverse, counts = torch.unique_consecutive(idx_sum, return_inverse=True, return_counts=True, dim=0) + + if _unique.shape[0] < indices.shape[0]: + perm = torch.arange(inverse.size(0), dtype=inverse.dtype, device=inverse.device) + features_new = torch.zeros((_unique.shape[0], features.shape[-1]), device=features.device) + features_new.index_add_(0, inverse.long(), features) + features = features_new + perm_ = inverse.new_empty(_unique.size(0)).scatter_(0, inverse, perm) + indices = indices[perm_].int() + + if not features_add is None: + features_add_new = torch.zeros((_unique.shape[0],), device=features_add.device) + features_add_new.index_add_(0, inverse.long(), features_add) + features_add = features_add_new / counts + return features, indices, features_add + + +def split_voxels(x, b, imps_3d, voxels_3d, kernel_offsets, mask_multi=True, topk=True, threshold=0.5): + """ + Generate and split the voxels into foreground and background sparse features, based on the predicted importance values. + Args: + x: [N, C], input sparse features + b: int, batch size id + imps_3d: [N, kernelsize**3], the prediced importance values + voxels_3d: [N, 3], the 3d positions of voxel centers + kernel_offsets: [kernelsize**3, 3], the offset coords in an kernel + mask_multi: bool, whether to multiply the predicted mask to features + topk: bool, whether to use topk or threshold for selection + threshold: float, threshold value + """ + index = x.indices[:, 0] + batch_index = index==b + indices_ori = x.indices[batch_index] + features_ori = x.features[batch_index] + mask_voxel = imps_3d[batch_index, -1].sigmoid() + mask_kernel = imps_3d[batch_index, :-1].sigmoid() + + if mask_multi: + features_ori *= mask_voxel.unsqueeze(-1) + + if topk: + _, indices = mask_voxel.sort(descending=True) + indices_fore = indices[:int(mask_voxel.shape[0]*threshold)] + indices_back = indices[int(mask_voxel.shape[0]*threshold):] + else: + indices_fore = mask_voxel > threshold + indices_back = mask_voxel <= threshold + + features_fore = features_ori[indices_fore] + coords_fore = indices_ori[indices_fore] + + mask_kernel_fore = mask_kernel[indices_fore] + mask_kernel_bool = mask_kernel_fore>=threshold + voxel_kerels_imp = kernel_offsets.unsqueeze(0).repeat(mask_kernel_bool.shape[0],1, 1) + mask_kernel_fore = mask_kernel[indices_fore][mask_kernel_bool] + indices_fore_kernels = coords_fore[:, 1:].unsqueeze(1).repeat(1, kernel_offsets.shape[0], 1) + indices_with_imp = indices_fore_kernels + voxel_kerels_imp + selected_indices = indices_with_imp[mask_kernel_bool] + spatial_indices = (selected_indices[:, 0] >0) * (selected_indices[:, 1] >0) * (selected_indices[:, 2] >0) * \ + (selected_indices[:, 0] < x.spatial_shape[0]) * (selected_indices[:, 1] < x.spatial_shape[1]) * (selected_indices[:, 2] < x.spatial_shape[2]) + selected_indices = selected_indices[spatial_indices] + mask_kernel_fore = mask_kernel_fore[spatial_indices] + selected_indices = torch.cat([torch.ones((selected_indices.shape[0], 1), device=features_fore.device)*b, selected_indices], dim=1) + + selected_features = torch.zeros((selected_indices.shape[0], features_ori.shape[1]), device=features_fore.device) + + features_fore_cat = torch.cat([features_fore, selected_features], dim=0) + coords_fore = torch.cat([coords_fore, selected_indices], dim=0) + mask_kernel_fore = torch.cat([torch.ones(features_fore.shape[0], device=features_fore.device), mask_kernel_fore], dim=0) + + features_fore, coords_fore, mask_kernel_fore = check_repeat(features_fore_cat, coords_fore, features_add=mask_kernel_fore) + + features_back = features_ori[indices_back] + coords_back = indices_ori[indices_back] + + return features_fore, coords_fore, features_back, coords_back, mask_kernel_fore diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/pfe/__init__.py b/toolbox/openpcdet/pcdet/models/backbones_3d/pfe/__init__.py new file mode 100644 index 000000000..b65a3f59b --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/pfe/__init__.py @@ -0,0 +1,5 @@ +from .voxel_set_abstraction import VoxelSetAbstraction + +__all__ = { + 'VoxelSetAbstraction': VoxelSetAbstraction +} diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py b/toolbox/openpcdet/pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py new file mode 100644 index 000000000..0f3b8ae93 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/pfe/voxel_set_abstraction.py @@ -0,0 +1,411 @@ +import math +import numpy as np +import torch +import torch.nn as nn + +from ....ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules +from ....ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_stack_utils +from ....utils import common_utils + + +def bilinear_interpolate_torch(im, x, y): + """ + Args: + im: (H, W, C) [y, x] + x: (N) + y: (N) + + Returns: + + """ + x0 = torch.floor(x).long() + x1 = x0 + 1 + + y0 = torch.floor(y).long() + y1 = y0 + 1 + + x0 = torch.clamp(x0, 0, im.shape[1] - 1) + x1 = torch.clamp(x1, 0, im.shape[1] - 1) + y0 = torch.clamp(y0, 0, im.shape[0] - 1) + y1 = torch.clamp(y1, 0, im.shape[0] - 1) + + Ia = im[y0, x0] + Ib = im[y1, x0] + Ic = im[y0, x1] + Id = im[y1, x1] + + wa = (x1.type_as(x) - x) * (y1.type_as(y) - y) + wb = (x1.type_as(x) - x) * (y - y0.type_as(y)) + wc = (x - x0.type_as(x)) * (y1.type_as(y) - y) + wd = (x - x0.type_as(x)) * (y - y0.type_as(y)) + ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd) + return ans + + +def sample_points_with_roi(rois, points, sample_radius_with_roi, num_max_points_of_part=200000): + """ + Args: + rois: (M, 7 + C) + points: (N, 3) + sample_radius_with_roi: + num_max_points_of_part: + + Returns: + sampled_points: (N_out, 3) + """ + if points.shape[0] < num_max_points_of_part: + distance = (points[:, None, :] - rois[None, :, 0:3]).norm(dim=-1) + min_dis, min_dis_roi_idx = distance.min(dim=-1) + roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1) + point_mask = min_dis < roi_max_dim + sample_radius_with_roi + else: + start_idx = 0 + point_mask_list = [] + while start_idx < points.shape[0]: + distance = (points[start_idx:start_idx + num_max_points_of_part, None, :] - rois[None, :, 0:3]).norm(dim=-1) + min_dis, min_dis_roi_idx = distance.min(dim=-1) + roi_max_dim = (rois[min_dis_roi_idx, 3:6] / 2).norm(dim=-1) + cur_point_mask = min_dis < roi_max_dim + sample_radius_with_roi + point_mask_list.append(cur_point_mask) + start_idx += num_max_points_of_part + point_mask = torch.cat(point_mask_list, dim=0) + + sampled_points = points[:1] if point_mask.sum() == 0 else points[point_mask, :] + + return sampled_points, point_mask + + +def sector_fps(points, num_sampled_points, num_sectors): + """ + Args: + points: (N, 3) + num_sampled_points: int + num_sectors: int + + Returns: + sampled_points: (N_out, 3) + """ + sector_size = np.pi * 2 / num_sectors + point_angles = torch.atan2(points[:, 1], points[:, 0]) + np.pi + sector_idx = (point_angles / sector_size).floor().clamp(min=0, max=num_sectors) + xyz_points_list = [] + xyz_batch_cnt = [] + num_sampled_points_list = [] + for k in range(num_sectors): + mask = (sector_idx == k) + cur_num_points = mask.sum().item() + if cur_num_points > 0: + xyz_points_list.append(points[mask]) + xyz_batch_cnt.append(cur_num_points) + ratio = cur_num_points / points.shape[0] + num_sampled_points_list.append( + min(cur_num_points, math.ceil(ratio * num_sampled_points)) + ) + + if len(xyz_batch_cnt) == 0: + xyz_points_list.append(points) + xyz_batch_cnt.append(len(points)) + num_sampled_points_list.append(num_sampled_points) + print(f'Warning: empty sector points detected in SectorFPS: points.shape={points.shape}') + + xyz = torch.cat(xyz_points_list, dim=0) + xyz_batch_cnt = torch.tensor(xyz_batch_cnt, device=points.device).int() + sampled_points_batch_cnt = torch.tensor(num_sampled_points_list, device=points.device).int() + + sampled_pt_idxs = pointnet2_stack_utils.stack_farthest_point_sample( + xyz.contiguous(), xyz_batch_cnt, sampled_points_batch_cnt + ).long() + + sampled_points = xyz[sampled_pt_idxs] + + return sampled_points + + +class VoxelSetAbstraction(nn.Module): + def __init__(self, model_cfg, voxel_size, point_cloud_range, num_bev_features=None, + num_rawpoint_features=None, **kwargs): + super().__init__() + self.model_cfg = model_cfg + self.voxel_size = voxel_size + self.point_cloud_range = point_cloud_range + + SA_cfg = self.model_cfg.SA_LAYER + + self.SA_layers = nn.ModuleList() + self.SA_layer_names = [] + self.downsample_times_map = {} + c_in = 0 + for src_name in self.model_cfg.FEATURES_SOURCE: + if src_name in ['bev', 'raw_points']: + continue + self.downsample_times_map[src_name] = SA_cfg[src_name].DOWNSAMPLE_FACTOR + + if SA_cfg[src_name].get('INPUT_CHANNELS', None) is None: + input_channels = SA_cfg[src_name].MLPS[0][0] \ + if isinstance(SA_cfg[src_name].MLPS[0], list) else SA_cfg[src_name].MLPS[0] + else: + input_channels = SA_cfg[src_name]['INPUT_CHANNELS'] + + cur_layer, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module( + input_channels=input_channels, config=SA_cfg[src_name] + ) + self.SA_layers.append(cur_layer) + self.SA_layer_names.append(src_name) + + c_in += cur_num_c_out + + if 'bev' in self.model_cfg.FEATURES_SOURCE: + c_bev = num_bev_features + c_in += c_bev + + if 'raw_points' in self.model_cfg.FEATURES_SOURCE: + self.SA_rawpoints, cur_num_c_out = pointnet2_stack_modules.build_local_aggregation_module( + input_channels=num_rawpoint_features - 3, config=SA_cfg['raw_points'] + ) + + c_in += cur_num_c_out + + self.vsa_point_feature_fusion = nn.Sequential( + nn.Linear(c_in, self.model_cfg.NUM_OUTPUT_FEATURES, bias=False), + nn.BatchNorm1d(self.model_cfg.NUM_OUTPUT_FEATURES), + nn.ReLU(), + ) + self.num_point_features = self.model_cfg.NUM_OUTPUT_FEATURES + self.num_point_features_before_fusion = c_in + + def interpolate_from_bev_features(self, keypoints, bev_features, batch_size, bev_stride): + """ + Args: + keypoints: (N1 + N2 + ..., 4) + bev_features: (B, C, H, W) + batch_size: + bev_stride: + + Returns: + point_bev_features: (N1 + N2 + ..., C) + """ + x_idxs = (keypoints[:, 1] - self.point_cloud_range[0]) / self.voxel_size[0] + y_idxs = (keypoints[:, 2] - self.point_cloud_range[1]) / self.voxel_size[1] + + x_idxs = x_idxs / bev_stride + y_idxs = y_idxs / bev_stride + + point_bev_features_list = [] + for k in range(batch_size): + bs_mask = (keypoints[:, 0] == k) + + cur_x_idxs = x_idxs[bs_mask] + cur_y_idxs = y_idxs[bs_mask] + cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C) + point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs) + point_bev_features_list.append(point_bev_features) + + point_bev_features = torch.cat(point_bev_features_list, dim=0) # (N1 + N2 + ..., C) + return point_bev_features + + def sectorized_proposal_centric_sampling(self, roi_boxes, points): + """ + Args: + roi_boxes: (M, 7 + C) + points: (N, 3) + + Returns: + sampled_points: (N_out, 3) + """ + + sampled_points, _ = sample_points_with_roi( + rois=roi_boxes, points=points, + sample_radius_with_roi=self.model_cfg.SPC_SAMPLING.SAMPLE_RADIUS_WITH_ROI, + num_max_points_of_part=self.model_cfg.SPC_SAMPLING.get('NUM_POINTS_OF_EACH_SAMPLE_PART', 200000) + ) + sampled_points = sector_fps( + points=sampled_points, num_sampled_points=self.model_cfg.NUM_KEYPOINTS, + num_sectors=self.model_cfg.SPC_SAMPLING.NUM_SECTORS + ) + return sampled_points + + def get_sampled_points(self, batch_dict): + """ + Args: + batch_dict: + + Returns: + keypoints: (N1 + N2 + ..., 4), where 4 indicates [bs_idx, x, y, z] + """ + batch_size = batch_dict['batch_size'] + if self.model_cfg.POINT_SOURCE == 'raw_points': + src_points = batch_dict['points'][:, 1:4] + batch_indices = batch_dict['points'][:, 0].long() + elif self.model_cfg.POINT_SOURCE == 'voxel_centers': + src_points = common_utils.get_voxel_centers( + batch_dict['voxel_coords'][:, 1:4], + downsample_times=1, + voxel_size=self.voxel_size, + point_cloud_range=self.point_cloud_range + ) + batch_indices = batch_dict['voxel_coords'][:, 0].long() + else: + raise NotImplementedError + keypoints_list = [] + for bs_idx in range(batch_size): + bs_mask = (batch_indices == bs_idx) + sampled_points = src_points[bs_mask].unsqueeze(dim=0) # (1, N, 3) + if self.model_cfg.SAMPLE_METHOD == 'FPS': + cur_pt_idxs = pointnet2_stack_utils.farthest_point_sample( + sampled_points[:, :, 0:3].contiguous(), self.model_cfg.NUM_KEYPOINTS + ).long() + + if sampled_points.shape[1] < self.model_cfg.NUM_KEYPOINTS: + times = int(self.model_cfg.NUM_KEYPOINTS / sampled_points.shape[1]) + 1 + non_empty = cur_pt_idxs[0, :sampled_points.shape[1]] + cur_pt_idxs[0] = non_empty.repeat(times)[:self.model_cfg.NUM_KEYPOINTS] + + keypoints = sampled_points[0][cur_pt_idxs[0]].unsqueeze(dim=0) + + elif self.model_cfg.SAMPLE_METHOD == 'SPC': + cur_keypoints = self.sectorized_proposal_centric_sampling( + roi_boxes=batch_dict['rois'][bs_idx], points=sampled_points[0] + ) + bs_idxs = cur_keypoints.new_ones(cur_keypoints.shape[0]) * bs_idx + keypoints = torch.cat((bs_idxs[:, None], cur_keypoints), dim=1) + else: + raise NotImplementedError + + keypoints_list.append(keypoints) + + keypoints = torch.cat(keypoints_list, dim=0) # (B, M, 3) or (N1 + N2 + ..., 4) + if len(keypoints.shape) == 3: + batch_idx = torch.arange(batch_size, device=keypoints.device).view(-1, 1).repeat(1, keypoints.shape[1]).view(-1, 1) + keypoints = torch.cat((batch_idx.float(), keypoints.view(-1, 3)), dim=1) + + return keypoints + + @staticmethod + def aggregate_keypoint_features_from_one_source( + batch_size, aggregate_func, xyz, xyz_features, xyz_bs_idxs, new_xyz, new_xyz_batch_cnt, + filter_neighbors_with_roi=False, radius_of_neighbor=None, num_max_points_of_part=200000, rois=None + ): + """ + + Args: + aggregate_func: + xyz: (N, 3) + xyz_features: (N, C) + xyz_bs_idxs: (N) + new_xyz: (M, 3) + new_xyz_batch_cnt: (batch_size), [N1, N2, ...] + + filter_neighbors_with_roi: True/False + radius_of_neighbor: float + num_max_points_of_part: int + rois: (batch_size, num_rois, 7 + C) + Returns: + + """ + xyz_batch_cnt = xyz.new_zeros(batch_size).int() + if filter_neighbors_with_roi: + point_features = torch.cat((xyz, xyz_features), dim=-1) if xyz_features is not None else xyz + point_features_list = [] + for bs_idx in range(batch_size): + bs_mask = (xyz_bs_idxs == bs_idx) + _, valid_mask = sample_points_with_roi( + rois=rois[bs_idx], points=xyz[bs_mask], + sample_radius_with_roi=radius_of_neighbor, num_max_points_of_part=num_max_points_of_part, + ) + point_features_list.append(point_features[bs_mask][valid_mask]) + xyz_batch_cnt[bs_idx] = valid_mask.sum() + + valid_point_features = torch.cat(point_features_list, dim=0) + xyz = valid_point_features[:, 0:3] + xyz_features = valid_point_features[:, 3:] if xyz_features is not None else None + else: + for bs_idx in range(batch_size): + xyz_batch_cnt[bs_idx] = (xyz_bs_idxs == bs_idx).sum() + + pooled_points, pooled_features = aggregate_func( + xyz=xyz.contiguous(), + xyz_batch_cnt=xyz_batch_cnt, + new_xyz=new_xyz, + new_xyz_batch_cnt=new_xyz_batch_cnt, + features=xyz_features.contiguous(), + ) + return pooled_features + + def forward(self, batch_dict): + """ + Args: + batch_dict: + batch_size: + keypoints: (B, num_keypoints, 3) + multi_scale_3d_features: { + 'x_conv4': ... + } + points: optional (N, 1 + 3 + C) [bs_idx, x, y, z, ...] + spatial_features: optional + spatial_features_stride: optional + + Returns: + point_features: (N, C) + point_coords: (N, 4) + + """ + keypoints = self.get_sampled_points(batch_dict) + + point_features_list = [] + if 'bev' in self.model_cfg.FEATURES_SOURCE: + point_bev_features = self.interpolate_from_bev_features( + keypoints, batch_dict['spatial_features'], batch_dict['batch_size'], + bev_stride=batch_dict['spatial_features_stride'] + ) + point_features_list.append(point_bev_features) + + batch_size = batch_dict['batch_size'] + + new_xyz = keypoints[:, 1:4].contiguous() + new_xyz_batch_cnt = new_xyz.new_zeros(batch_size).int() + for k in range(batch_size): + new_xyz_batch_cnt[k] = (keypoints[:, 0] == k).sum() + + if 'raw_points' in self.model_cfg.FEATURES_SOURCE: + raw_points = batch_dict['points'] + + pooled_features = self.aggregate_keypoint_features_from_one_source( + batch_size=batch_size, aggregate_func=self.SA_rawpoints, + xyz=raw_points[:, 1:4], + xyz_features=raw_points[:, 4:].contiguous() if raw_points.shape[1] > 4 else None, + xyz_bs_idxs=raw_points[:, 0], + new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt, + filter_neighbors_with_roi=self.model_cfg.SA_LAYER['raw_points'].get('FILTER_NEIGHBOR_WITH_ROI', False), + radius_of_neighbor=self.model_cfg.SA_LAYER['raw_points'].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None), + rois=batch_dict.get('rois', None) + ) + point_features_list.append(pooled_features) + + for k, src_name in enumerate(self.SA_layer_names): + cur_coords = batch_dict['multi_scale_3d_features'][src_name].indices + cur_features = batch_dict['multi_scale_3d_features'][src_name].features.contiguous() + + xyz = common_utils.get_voxel_centers( + cur_coords[:, 1:4], downsample_times=self.downsample_times_map[src_name], + voxel_size=self.voxel_size, point_cloud_range=self.point_cloud_range + ) + + pooled_features = self.aggregate_keypoint_features_from_one_source( + batch_size=batch_size, aggregate_func=self.SA_layers[k], + xyz=xyz.contiguous(), xyz_features=cur_features, xyz_bs_idxs=cur_coords[:, 0], + new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt, + filter_neighbors_with_roi=self.model_cfg.SA_LAYER[src_name].get('FILTER_NEIGHBOR_WITH_ROI', False), + radius_of_neighbor=self.model_cfg.SA_LAYER[src_name].get('RADIUS_OF_NEIGHBOR_WITH_ROI', None), + rois=batch_dict.get('rois', None) + ) + + point_features_list.append(pooled_features) + + point_features = torch.cat(point_features_list, dim=-1) + + batch_dict['point_features_before_fusion'] = point_features.view(-1, point_features.shape[-1]) + point_features = self.vsa_point_feature_fusion(point_features.view(-1, point_features.shape[-1])) + + batch_dict['point_features'] = point_features # (BxN, C) + batch_dict['point_coords'] = keypoints # (BxN, 4) + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/pointnet2_backbone.py b/toolbox/openpcdet/pcdet/models/backbones_3d/pointnet2_backbone.py new file mode 100644 index 000000000..fdd0d021f --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/pointnet2_backbone.py @@ -0,0 +1,206 @@ +import torch +import torch.nn as nn + +from ...ops.pointnet2.pointnet2_batch import pointnet2_modules +from ...ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_modules_stack +from ...ops.pointnet2.pointnet2_stack import pointnet2_utils as pointnet2_utils_stack + + +class PointNet2MSG(nn.Module): + def __init__(self, model_cfg, input_channels, **kwargs): + super().__init__() + self.model_cfg = model_cfg + + self.SA_modules = nn.ModuleList() + channel_in = input_channels - 3 + + self.num_points_each_layer = [] + skip_channel_list = [input_channels - 3] + for k in range(self.model_cfg.SA_CONFIG.NPOINTS.__len__()): + mlps = self.model_cfg.SA_CONFIG.MLPS[k].copy() + channel_out = 0 + for idx in range(mlps.__len__()): + mlps[idx] = [channel_in] + mlps[idx] + channel_out += mlps[idx][-1] + + self.SA_modules.append( + pointnet2_modules.PointnetSAModuleMSG( + npoint=self.model_cfg.SA_CONFIG.NPOINTS[k], + radii=self.model_cfg.SA_CONFIG.RADIUS[k], + nsamples=self.model_cfg.SA_CONFIG.NSAMPLE[k], + mlps=mlps, + use_xyz=self.model_cfg.SA_CONFIG.get('USE_XYZ', True), + ) + ) + skip_channel_list.append(channel_out) + channel_in = channel_out + + self.FP_modules = nn.ModuleList() + + for k in range(self.model_cfg.FP_MLPS.__len__()): + pre_channel = self.model_cfg.FP_MLPS[k + 1][-1] if k + 1 < len(self.model_cfg.FP_MLPS) else channel_out + self.FP_modules.append( + pointnet2_modules.PointnetFPModule( + mlp=[pre_channel + skip_channel_list[k]] + self.model_cfg.FP_MLPS[k] + ) + ) + + self.num_point_features = self.model_cfg.FP_MLPS[0][-1] + + def break_up_pc(self, pc): + batch_idx = pc[:, 0] + xyz = pc[:, 1:4].contiguous() + features = (pc[:, 4:].contiguous() if pc.size(-1) > 4 else None) + return batch_idx, xyz, features + + def forward(self, batch_dict): + """ + Args: + batch_dict: + batch_size: int + vfe_features: (num_voxels, C) + points: (num_points, 4 + C), [batch_idx, x, y, z, ...] + Returns: + batch_dict: + encoded_spconv_tensor: sparse tensor + point_features: (N, C) + """ + batch_size = batch_dict['batch_size'] + points = batch_dict['points'] + batch_idx, xyz, features = self.break_up_pc(points) + + xyz_batch_cnt = xyz.new_zeros(batch_size).int() + for bs_idx in range(batch_size): + xyz_batch_cnt[bs_idx] = (batch_idx == bs_idx).sum() + + assert xyz_batch_cnt.min() == xyz_batch_cnt.max() + xyz = xyz.view(batch_size, -1, 3) + features = features.view(batch_size, -1, features.shape[-1]).permute(0, 2, 1).contiguous() if features is not None else None + + l_xyz, l_features = [xyz], [features] + for i in range(len(self.SA_modules)): + li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i]) + l_xyz.append(li_xyz) + l_features.append(li_features) + + for i in range(-1, -(len(self.FP_modules) + 1), -1): + l_features[i - 1] = self.FP_modules[i]( + l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i] + ) # (B, C, N) + + point_features = l_features[0].permute(0, 2, 1).contiguous() # (B, N, C) + batch_dict['point_features'] = point_features.view(-1, point_features.shape[-1]) + batch_dict['point_coords'] = torch.cat((batch_idx[:, None].float(), l_xyz[0].view(-1, 3)), dim=1) + return batch_dict + + +class PointNet2Backbone(nn.Module): + """ + DO NOT USE THIS CURRENTLY SINCE IT MAY HAVE POTENTIAL BUGS, 20200723 + """ + def __init__(self, model_cfg, input_channels, **kwargs): + assert False, 'DO NOT USE THIS CURRENTLY SINCE IT MAY HAVE POTENTIAL BUGS, 20200723' + super().__init__() + self.model_cfg = model_cfg + + self.SA_modules = nn.ModuleList() + channel_in = input_channels - 3 + + self.num_points_each_layer = [] + skip_channel_list = [input_channels] + for k in range(self.model_cfg.SA_CONFIG.NPOINTS.__len__()): + self.num_points_each_layer.append(self.model_cfg.SA_CONFIG.NPOINTS[k]) + mlps = self.model_cfg.SA_CONFIG.MLPS[k].copy() + channel_out = 0 + for idx in range(mlps.__len__()): + mlps[idx] = [channel_in] + mlps[idx] + channel_out += mlps[idx][-1] + + self.SA_modules.append( + pointnet2_modules_stack.StackSAModuleMSG( + radii=self.model_cfg.SA_CONFIG.RADIUS[k], + nsamples=self.model_cfg.SA_CONFIG.NSAMPLE[k], + mlps=mlps, + use_xyz=self.model_cfg.SA_CONFIG.get('USE_XYZ', True), + ) + ) + skip_channel_list.append(channel_out) + channel_in = channel_out + + self.FP_modules = nn.ModuleList() + + for k in range(self.model_cfg.FP_MLPS.__len__()): + pre_channel = self.model_cfg.FP_MLPS[k + 1][-1] if k + 1 < len(self.model_cfg.FP_MLPS) else channel_out + self.FP_modules.append( + pointnet2_modules_stack.StackPointnetFPModule( + mlp=[pre_channel + skip_channel_list[k]] + self.model_cfg.FP_MLPS[k] + ) + ) + + self.num_point_features = self.model_cfg.FP_MLPS[0][-1] + + def break_up_pc(self, pc): + batch_idx = pc[:, 0] + xyz = pc[:, 1:4].contiguous() + features = (pc[:, 4:].contiguous() if pc.size(-1) > 4 else None) + return batch_idx, xyz, features + + def forward(self, batch_dict): + """ + Args: + batch_dict: + batch_size: int + vfe_features: (num_voxels, C) + points: (num_points, 4 + C), [batch_idx, x, y, z, ...] + Returns: + batch_dict: + encoded_spconv_tensor: sparse tensor + point_features: (N, C) + """ + batch_size = batch_dict['batch_size'] + points = batch_dict['points'] + batch_idx, xyz, features = self.break_up_pc(points) + + xyz_batch_cnt = xyz.new_zeros(batch_size).int() + for bs_idx in range(batch_size): + xyz_batch_cnt[bs_idx] = (batch_idx == bs_idx).sum() + + l_xyz, l_features, l_batch_cnt = [xyz], [features], [xyz_batch_cnt] + for i in range(len(self.SA_modules)): + new_xyz_list = [] + for k in range(batch_size): + if len(l_xyz) == 1: + cur_xyz = l_xyz[0][batch_idx == k] + else: + last_num_points = self.num_points_each_layer[i - 1] + cur_xyz = l_xyz[-1][k * last_num_points: (k + 1) * last_num_points] + cur_pt_idxs = pointnet2_utils_stack.farthest_point_sample( + cur_xyz[None, :, :].contiguous(), self.num_points_each_layer[i] + ).long()[0] + if cur_xyz.shape[0] < self.num_points_each_layer[i]: + empty_num = self.num_points_each_layer[i] - cur_xyz.shape[1] + cur_pt_idxs[0, -empty_num:] = cur_pt_idxs[0, :empty_num] + new_xyz_list.append(cur_xyz[cur_pt_idxs]) + new_xyz = torch.cat(new_xyz_list, dim=0) + + new_xyz_batch_cnt = xyz.new_zeros(batch_size).int().fill_(self.num_points_each_layer[i]) + li_xyz, li_features = self.SA_modules[i]( + xyz=l_xyz[i], features=l_features[i], xyz_batch_cnt=l_batch_cnt[i], + new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt + ) + + l_xyz.append(li_xyz) + l_features.append(li_features) + l_batch_cnt.append(new_xyz_batch_cnt) + + l_features[0] = points[:, 1:] + for i in range(-1, -(len(self.FP_modules) + 1), -1): + l_features[i - 1] = self.FP_modules[i]( + unknown=l_xyz[i - 1], unknown_batch_cnt=l_batch_cnt[i - 1], + known=l_xyz[i], known_batch_cnt=l_batch_cnt[i], + unknown_feats=l_features[i - 1], known_feats=l_features[i] + ) + + batch_dict['point_features'] = l_features[0] + batch_dict['point_coords'] = torch.cat((batch_idx[:, None].float(), l_xyz[0]), dim=1) + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone.py b/toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone.py new file mode 100644 index 000000000..f0c231a66 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone.py @@ -0,0 +1,295 @@ +from functools import partial + +import torch.nn as nn + +from ...utils.spconv_utils import replace_feature, spconv + + +def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0, + conv_type='subm', norm_fn=None): + + if conv_type == 'subm': + conv = spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key) + elif conv_type == 'spconv': + conv = spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, + bias=False, indice_key=indice_key) + elif conv_type == 'inverseconv': + conv = spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False) + else: + raise NotImplementedError + + m = spconv.SparseSequential( + conv, + norm_fn(out_channels), + nn.ReLU(), + ) + + return m + + +class SparseBasicBlock(spconv.SparseModule): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, bias=None, norm_fn=None, downsample=None, indice_key=None): + super(SparseBasicBlock, self).__init__() + + assert norm_fn is not None + if bias is None: + bias = norm_fn is not None + self.conv1 = spconv.SubMConv3d( + inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key + ) + self.bn1 = norm_fn(planes) + self.relu = nn.ReLU() + self.conv2 = spconv.SubMConv3d( + planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key + ) + self.bn2 = norm_fn(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = replace_feature(out, self.bn1(out.features)) + out = replace_feature(out, self.relu(out.features)) + + out = self.conv2(out) + out = replace_feature(out, self.bn2(out.features)) + + if self.downsample is not None: + identity = self.downsample(x) + + out = replace_feature(out, out.features + identity.features) + out = replace_feature(out, self.relu(out.features)) + + return out + + +class VoxelBackBone8x(nn.Module): + def __init__(self, model_cfg, input_channels, grid_size, **kwargs): + super().__init__() + self.model_cfg = model_cfg + norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01) + + self.sparse_shape = grid_size[::-1] + [1, 0, 0] + + self.conv_input = spconv.SparseSequential( + spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'), + norm_fn(16), + nn.ReLU(), + ) + block = post_act_block + + self.conv1 = spconv.SparseSequential( + block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'), + ) + + self.conv2 = spconv.SparseSequential( + # [1600, 1408, 41] <- [800, 704, 21] + block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'), + block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'), + block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'), + ) + + self.conv3 = spconv.SparseSequential( + # [800, 704, 21] <- [400, 352, 11] + block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'), + ) + + self.conv4 = spconv.SparseSequential( + # [400, 352, 11] <- [200, 176, 5] + block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'), + ) + + last_pad = 0 + last_pad = self.model_cfg.get('last_pad', last_pad) + self.conv_out = spconv.SparseSequential( + # [200, 150, 5] -> [200, 150, 2] + spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad, + bias=False, indice_key='spconv_down2'), + norm_fn(128), + nn.ReLU(), + ) + self.num_point_features = 128 + self.backbone_channels = { + 'x_conv1': 16, + 'x_conv2': 32, + 'x_conv3': 64, + 'x_conv4': 64 + } + + + + def forward(self, batch_dict): + """ + Args: + batch_dict: + batch_size: int + vfe_features: (num_voxels, C) + voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx] + Returns: + batch_dict: + encoded_spconv_tensor: sparse tensor + """ + voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords'] + batch_size = batch_dict['batch_size'] + input_sp_tensor = spconv.SparseConvTensor( + features=voxel_features, + indices=voxel_coords.int(), + spatial_shape=self.sparse_shape, + batch_size=batch_size + ) + + x = self.conv_input(input_sp_tensor) + + x_conv1 = self.conv1(x) + x_conv2 = self.conv2(x_conv1) + x_conv3 = self.conv3(x_conv2) + x_conv4 = self.conv4(x_conv3) + + # for detection head + # [200, 176, 5] -> [200, 176, 2] + out = self.conv_out(x_conv4) + + batch_dict.update({ + 'encoded_spconv_tensor': out, + 'encoded_spconv_tensor_stride': 8 + }) + batch_dict.update({ + 'multi_scale_3d_features': { + 'x_conv1': x_conv1, + 'x_conv2': x_conv2, + 'x_conv3': x_conv3, + 'x_conv4': x_conv4, + } + }) + batch_dict.update({ + 'multi_scale_3d_strides': { + 'x_conv1': 1, + 'x_conv2': 2, + 'x_conv3': 4, + 'x_conv4': 8, + } + }) + + return batch_dict + + +class VoxelResBackBone8x(nn.Module): + def __init__(self, model_cfg, input_channels, grid_size, **kwargs): + super().__init__() + self.model_cfg = model_cfg + use_bias = self.model_cfg.get('USE_BIAS', None) + norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01) + + self.sparse_shape = grid_size[::-1] + [1, 0, 0] + + self.conv_input = spconv.SparseSequential( + spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'), + norm_fn(16), + nn.ReLU(), + ) + block = post_act_block + + self.conv1 = spconv.SparseSequential( + SparseBasicBlock(16, 16, bias=use_bias, norm_fn=norm_fn, indice_key='res1'), + SparseBasicBlock(16, 16, bias=use_bias, norm_fn=norm_fn, indice_key='res1'), + ) + + self.conv2 = spconv.SparseSequential( + # [1600, 1408, 41] <- [800, 704, 21] + block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'), + SparseBasicBlock(32, 32, bias=use_bias, norm_fn=norm_fn, indice_key='res2'), + SparseBasicBlock(32, 32, bias=use_bias, norm_fn=norm_fn, indice_key='res2'), + ) + + self.conv3 = spconv.SparseSequential( + # [800, 704, 21] <- [400, 352, 11] + block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'), + SparseBasicBlock(64, 64, bias=use_bias, norm_fn=norm_fn, indice_key='res3'), + SparseBasicBlock(64, 64, bias=use_bias, norm_fn=norm_fn, indice_key='res3'), + ) + + self.conv4 = spconv.SparseSequential( + # [400, 352, 11] <- [200, 176, 5] + block(64, 128, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'), + SparseBasicBlock(128, 128, bias=use_bias, norm_fn=norm_fn, indice_key='res4'), + SparseBasicBlock(128, 128, bias=use_bias, norm_fn=norm_fn, indice_key='res4'), + ) + + last_pad = 0 + last_pad = self.model_cfg.get('last_pad', last_pad) + self.conv_out = spconv.SparseSequential( + # [200, 150, 5] -> [200, 150, 2] + spconv.SparseConv3d(128, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad, + bias=False, indice_key='spconv_down2'), + norm_fn(128), + nn.ReLU(), + ) + self.num_point_features = 128 + self.backbone_channels = { + 'x_conv1': 16, + 'x_conv2': 32, + 'x_conv3': 64, + 'x_conv4': 128 + } + + def forward(self, batch_dict): + """ + Args: + batch_dict: + batch_size: int + vfe_features: (num_voxels, C) + voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx] + Returns: + batch_dict: + encoded_spconv_tensor: sparse tensor + """ + voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords'] + batch_size = batch_dict['batch_size'] + input_sp_tensor = spconv.SparseConvTensor( + features=voxel_features, + indices=voxel_coords.int(), + spatial_shape=self.sparse_shape, + batch_size=batch_size + ) + x = self.conv_input(input_sp_tensor) + + x_conv1 = self.conv1(x) + x_conv2 = self.conv2(x_conv1) + x_conv3 = self.conv3(x_conv2) + x_conv4 = self.conv4(x_conv3) + + # for detection head + # [200, 176, 5] -> [200, 176, 2] + out = self.conv_out(x_conv4) + + batch_dict.update({ + 'encoded_spconv_tensor': out, + 'encoded_spconv_tensor_stride': 8 + }) + batch_dict.update({ + 'multi_scale_3d_features': { + 'x_conv1': x_conv1, + 'x_conv2': x_conv2, + 'x_conv3': x_conv3, + 'x_conv4': x_conv4, + } + }) + + batch_dict.update({ + 'multi_scale_3d_strides': { + 'x_conv1': 1, + 'x_conv2': 2, + 'x_conv3': 4, + 'x_conv4': 8, + } + }) + + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone_2d.py b/toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone_2d.py new file mode 100644 index 000000000..3784ada1f --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone_2d.py @@ -0,0 +1,300 @@ +from functools import partial + +import torch.nn as nn + +from ...utils.spconv_utils import replace_feature, spconv + + +def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0, + conv_type='subm', norm_fn=None): + + if conv_type == 'subm': + conv = spconv.SubMConv2d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key) + elif conv_type == 'spconv': + conv = spconv.SparseConv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, + bias=False, indice_key=indice_key) + elif conv_type == 'inverseconv': + conv = spconv.SparseInverseConv2d(in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False) + else: + raise NotImplementedError + + m = spconv.SparseSequential( + conv, + norm_fn(out_channels), + nn.ReLU(), + ) + + return m + + +def post_act_block_dense(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, norm_fn=None): + m = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, dilation=dilation, bias=False), + norm_fn(out_channels), + nn.ReLU(), + ) + + return m + + +class SparseBasicBlock(spconv.SparseModule): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, norm_fn=None, downsample=None, indice_key=None): + super(SparseBasicBlock, self).__init__() + + assert norm_fn is not None + bias = norm_fn is not None + self.conv1 = spconv.SubMConv2d( + inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key + ) + self.bn1 = norm_fn(planes) + self.relu = nn.ReLU() + self.conv2 = spconv.SubMConv2d( + planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key + ) + self.bn2 = norm_fn(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = replace_feature(out, self.bn1(out.features)) + out = replace_feature(out, self.relu(out.features)) + + out = self.conv2(out) + out = replace_feature(out, self.bn2(out.features)) + + if self.downsample is not None: + identity = self.downsample(x) + + out = replace_feature(out, out.features + identity.features) + out = replace_feature(out, self.relu(out.features)) + + return out + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, norm_fn=None, downsample=None): + super(BasicBlock, self).__init__() + + assert norm_fn is not None + bias = norm_fn is not None + self.conv1 = nn.Conv2d(inplanes, planes, 3, stride=stride, padding=1, bias=bias) + self.bn1 = norm_fn(planes) + self.relu = nn.ReLU() + self.conv2 = nn.Conv2d(planes, planes, 3, stride=stride, padding=1, bias=bias) + self.bn2 = norm_fn(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out = out + identity + out = self.relu(out) + + return out + + +class PillarBackBone8x(nn.Module): + def __init__(self, model_cfg, input_channels, grid_size, **kwargs): + super().__init__() + self.model_cfg = model_cfg + norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01) + self.sparse_shape = grid_size[[1, 0]] + + block = post_act_block + dense_block = post_act_block_dense + + self.conv1 = spconv.SparseSequential( + block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'), + block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'), + ) + + self.conv2 = spconv.SparseSequential( + # [1600, 1408] <- [800, 704] + block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'), + ) + + self.conv3 = spconv.SparseSequential( + # [800, 704] <- [400, 352] + block(64, 128, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'), + block(128, 128, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'), + block(128, 128, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'), + ) + + self.conv4 = spconv.SparseSequential( + # [400, 352] <- [200, 176] + block(128, 256, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv4', conv_type='spconv'), + block(256, 256, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'), + block(256, 256, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'), + ) + + norm_fn = partial(nn.BatchNorm2d, eps=1e-3, momentum=0.01) + self.conv5 = nn.Sequential( + # [200, 176] <- [100, 88] + dense_block(256, 256, 3, norm_fn=norm_fn, stride=2, padding=1), + dense_block(256, 256, 3, norm_fn=norm_fn, padding=1), + dense_block(256, 256, 3, norm_fn=norm_fn, padding=1), + ) + + self.num_point_features = 256 + self.backbone_channels = { + 'x_conv1': 32, + 'x_conv2': 64, + 'x_conv3': 128, + 'x_conv4': 256, + 'x_conv5': 256 + } + + + def forward(self, batch_dict): + pillar_features, pillar_coords = batch_dict['pillar_features'], batch_dict['pillar_coords'] + batch_size = batch_dict['batch_size'] + input_sp_tensor = spconv.SparseConvTensor( + features=pillar_features, + indices=pillar_coords.int(), + spatial_shape=self.sparse_shape, + batch_size=batch_size + ) + + x_conv1 = self.conv1(input_sp_tensor) + x_conv2 = self.conv2(x_conv1) + x_conv3 = self.conv3(x_conv2) + x_conv4 = self.conv4(x_conv3) + x_conv4 = x_conv4.dense() + x_conv5 = self.conv5(x_conv4) + + batch_dict.update({ + 'multi_scale_2d_features': { + 'x_conv1': x_conv1, + 'x_conv2': x_conv2, + 'x_conv3': x_conv3, + 'x_conv4': x_conv4, + 'x_conv5': x_conv5, + } + }) + batch_dict.update({ + 'multi_scale_2d_strides': { + 'x_conv1': 1, + 'x_conv2': 2, + 'x_conv3': 4, + 'x_conv4': 8, + 'x_conv5': 16, + } + }) + + return batch_dict + + +class PillarRes18BackBone8x(nn.Module): + def __init__(self, model_cfg, input_channels, grid_size, **kwargs): + super().__init__() + self.model_cfg = model_cfg + norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01) + self.sparse_shape = grid_size[[1, 0]] + + block = post_act_block + dense_block = post_act_block_dense + + self.conv1 = spconv.SparseSequential( + SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res1'), + SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res1'), + ) + + self.conv2 = spconv.SparseSequential( + # [1600, 1408] <- [800, 704] + block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'), + SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res2'), + SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res2'), + ) + + self.conv3 = spconv.SparseSequential( + # [800, 704] <- [400, 352] + block(64, 128, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'), + SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res3'), + SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res3'), + ) + + self.conv4 = spconv.SparseSequential( + # [400, 352] <- [200, 176] + block(128, 256, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv4', conv_type='spconv'), + SparseBasicBlock(256, 256, norm_fn=norm_fn, indice_key='res4'), + SparseBasicBlock(256, 256, norm_fn=norm_fn, indice_key='res4'), + ) + + norm_fn = partial(nn.BatchNorm2d, eps=1e-3, momentum=0.01) + self.conv5 = nn.Sequential( + # [200, 176] <- [100, 88] + dense_block(256, 256, 3, norm_fn=norm_fn, stride=2, padding=1), + BasicBlock(256, 256, norm_fn=norm_fn), + BasicBlock(256, 256, norm_fn=norm_fn), + ) + + self.num_point_features = 256 + self.backbone_channels = { + 'x_conv1': 32, + 'x_conv2': 64, + 'x_conv3': 128, + 'x_conv4': 256, + 'x_conv5': 256 + } + + def forward(self, batch_dict): + pillar_features, pillar_coords = batch_dict['pillar_features'], batch_dict['pillar_coords'] + batch_size = batch_dict['batch_size'] + input_sp_tensor = spconv.SparseConvTensor( + features=pillar_features, + indices=pillar_coords.int(), + spatial_shape=self.sparse_shape, + batch_size=batch_size + ) + + x_conv1 = self.conv1(input_sp_tensor) + x_conv2 = self.conv2(x_conv1) + x_conv3 = self.conv3(x_conv2) + x_conv4 = self.conv4(x_conv3) + x_conv4 = x_conv4.dense() + x_conv5 = self.conv5(x_conv4) + + # batch_dict.update({ + # 'encoded_spconv_tensor': out, + # 'encoded_spconv_tensor_stride': 8 + # }) + batch_dict.update({ + 'multi_scale_2d_features': { + 'x_conv1': x_conv1, + 'x_conv2': x_conv2, + 'x_conv3': x_conv3, + 'x_conv4': x_conv4, + 'x_conv5': x_conv5, + } + }) + batch_dict.update({ + 'multi_scale_2d_strides': { + 'x_conv1': 1, + 'x_conv2': 2, + 'x_conv3': 4, + 'x_conv4': 8, + 'x_conv5': 16, + } + }) + + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone_focal.py b/toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone_focal.py new file mode 100755 index 000000000..5869b8ba5 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone_focal.py @@ -0,0 +1,269 @@ +from functools import partial + +import torch +from pcdet.utils.spconv_utils import spconv +import torch.nn as nn + +from .focal_sparse_conv.focal_sparse_conv import FocalSparseConv +from .focal_sparse_conv.SemanticSeg.pyramid_ffn import PyramidFeat2D + + +class objDict: + @staticmethod + def to_object(obj: object, **data): + obj.__dict__.update(data) + +class ConfigDict: + def __init__(self, name): + self.name = name + def __getitem__(self, item): + return getattr(self, item) + + +class SparseSequentialBatchdict(spconv.SparseSequential): + def __init__(self, *args, **kwargs): + super(SparseSequentialBatchdict, self).__init__(*args, **kwargs) + + def forward(self, input, batch_dict=None): + loss = 0 + for k, module in self._modules.items(): + if module is None: + continue + if isinstance(module, (FocalSparseConv,)): + input, batch_dict, _loss = module(input, batch_dict) + loss += _loss + else: + input = module(input) + return input, batch_dict, loss + + +def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0, + conv_type='subm', norm_fn=None): + + if conv_type == 'subm': + conv = spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key) + elif conv_type == 'spconv': + conv = spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, + bias=False, indice_key=indice_key) + elif conv_type == 'inverseconv': + conv = spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False) + else: + raise NotImplementedError + + m = spconv.SparseSequential( + conv, + norm_fn(out_channels), + nn.ReLU(True), + ) + + return m + + +class SparseBasicBlock(spconv.SparseModule): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, norm_fn=None, downsample=None, indice_key=None): + super(SparseBasicBlock, self).__init__() + + assert norm_fn is not None + bias = norm_fn is not None + self.conv1 = spconv.SubMConv3d( + inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key + ) + self.bn1 = norm_fn(planes) + self.relu = nn.ReLU(True) + self.conv2 = spconv.SubMConv3d( + planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key + ) + self.bn2 = norm_fn(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = out.replace_feature(self.bn1(out.features)) + out = out.replace_feature(self.relu(out.features)) + + out = self.conv2(out) + out = out.replace_feature(self.bn2(out.features)) + + if self.downsample is not None: + identity = self.downsample(x) + + out = out.replace_feature(out.features + identity.features) + out = out.replace_feature(self.relu(out.features)) + + return out + + +class VoxelBackBone8xFocal(nn.Module): + def __init__(self, model_cfg, input_channels, grid_size, **kwargs): + super().__init__() + self.model_cfg = model_cfg + + norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01) + + self.sparse_shape = grid_size[::-1] + [1, 0, 0] + + self.conv_input = spconv.SparseSequential( + spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'), + norm_fn(16), + nn.ReLU(True), + ) + + block = post_act_block + + use_img = model_cfg.get('USE_IMG', False) + topk = model_cfg.get('TOPK', True) + threshold = model_cfg.get('THRESHOLD', 0.5) + kernel_size = model_cfg.get('KERNEL_SIZE', 3) + mask_multi = model_cfg.get('MASK_MULTI', False) + skip_mask_kernel = model_cfg.get('SKIP_MASK_KERNEL', False) + skip_mask_kernel_image = model_cfg.get('SKIP_MASK_KERNEL_IMG', False) + enlarge_voxel_channels = model_cfg.get('ENLARGE_VOXEL_CHANNELS', -1) + img_pretrain = model_cfg.get('IMG_PRETRAIN', "../checkpoints/deeplabv3_resnet50_coco-cd0a2569.pth") + + if use_img: + model_cfg_seg=dict( + name='SemDeepLabV3', + backbone='ResNet50', + num_class=21, # pretrained on COCO + args={"feat_extract_layer": ["layer1"], + "pretrained_path": img_pretrain}, + channel_reduce={ + "in_channels": [256], + "out_channels": [16], + "kernel_size": [1], + "stride": [1], + "bias": [False] + } + ) + cfg_dict = ConfigDict('SemDeepLabV3') + objDict.to_object(cfg_dict, **model_cfg_seg) + self.semseg = PyramidFeat2D(optimize=True, model_cfg=cfg_dict) + + self.conv_focal_multimodal = FocalSparseConv(16, 16, image_channel=model_cfg_seg['channel_reduce']['out_channels'][0], + topk=topk, threshold=threshold, use_img=True, skip_mask_kernel=skip_mask_kernel_image, + voxel_stride=1, norm_fn=norm_fn, indice_key='spconv_focal_multimodal') + + special_spconv_fn = partial(FocalSparseConv, mask_multi=mask_multi, enlarge_voxel_channels=enlarge_voxel_channels, + topk=topk, threshold=threshold, kernel_size=kernel_size, padding=kernel_size//2, + skip_mask_kernel=skip_mask_kernel) + self.use_img = use_img + + self.conv1 = SparseSequentialBatchdict( + block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'), + special_spconv_fn(16, 16, voxel_stride=1, norm_fn=norm_fn, indice_key='focal1'), + ) + + self.conv2 =SparseSequentialBatchdict( + # [1600, 1408, 41] <- [800, 704, 21] + block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'), + block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'), + block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'), + special_spconv_fn(32, 32, voxel_stride=2, norm_fn=norm_fn, indice_key='focal2'), + ) + + self.conv3 = SparseSequentialBatchdict( + # [800, 704, 21] <- [400, 352, 11] + block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'), + special_spconv_fn(64, 64, voxel_stride=4, norm_fn=norm_fn, indice_key='focal3'), + ) + + self.conv4 = SparseSequentialBatchdict( + # [400, 352, 11] <- [200, 176, 5] + block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'), + ) + + last_pad = 0 + last_pad = self.model_cfg.get('last_pad', last_pad) + self.conv_out = spconv.SparseSequential( + # [200, 150, 5] -> [200, 150, 2] + spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad, + bias=False, indice_key='spconv_down2'), + norm_fn(128), + nn.ReLU(True), + ) + self.num_point_features = 128 + self.backbone_channels = { + 'x_conv1': 16, + 'x_conv2': 32, + 'x_conv3': 64, + 'x_conv4': 64 + } + + self.forward_ret_dict = {} + + def get_loss(self, tb_dict=None): + loss = self.forward_ret_dict['loss_box_of_pts'] + if tb_dict is None: + tb_dict = {} + tb_dict['loss_box_of_pts'] = loss.item() + return loss, tb_dict + + def forward(self, batch_dict): + """ + Args: + batch_dict: + batch_size: int + vfe_features: (num_voxels, C) + voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx] + Returns: + batch_dict: + encoded_spconv_tensor: sparse tensor + """ + voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords'] + batch_size = batch_dict['batch_size'] + input_sp_tensor = spconv.SparseConvTensor( + features=voxel_features, + indices=voxel_coords.int(), + spatial_shape=self.sparse_shape, + batch_size=batch_size + ) + + loss_img = 0 + + x = self.conv_input(input_sp_tensor) + x_conv1, batch_dict, loss1 = self.conv1(x, batch_dict) + + if self.use_img: + x_image = self.semseg(batch_dict['images'])['layer1_feat2d'] + x_conv1, batch_dict, loss_img = self.conv_focal_multimodal(x_conv1, batch_dict, x_image) + + x_conv2, batch_dict, loss2 = self.conv2(x_conv1, batch_dict) + x_conv3, batch_dict, loss3 = self.conv3(x_conv2, batch_dict) + x_conv4, batch_dict, loss4 = self.conv4(x_conv3, batch_dict) + + self.forward_ret_dict['loss_box_of_pts'] = loss1 + loss2 + loss3 + loss4 + loss_img + # for detection head + # [200, 176, 5] -> [200, 176, 2] + out = self.conv_out(x_conv4) + + batch_dict.update({ + 'encoded_spconv_tensor': out, + 'encoded_spconv_tensor_stride': 8 + }) + batch_dict.update({ + 'multi_scale_3d_features': { + 'x_conv1': x_conv1, + 'x_conv2': x_conv2, + 'x_conv3': x_conv3, + 'x_conv4': x_conv4, + } + }) + batch_dict.update({ + 'multi_scale_3d_strides': { + 'x_conv1': 1, + 'x_conv2': 2, + 'x_conv3': 4, + 'x_conv4': 8, + } + }) + + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone_voxelnext.py b/toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone_voxelnext.py new file mode 100644 index 000000000..94ee73497 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone_voxelnext.py @@ -0,0 +1,225 @@ +from functools import partial +import torch +import torch.nn as nn + +from ...utils.spconv_utils import replace_feature, spconv + + +def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0, + conv_type='subm', norm_fn=None): + + if conv_type == 'subm': + conv = spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key) + elif conv_type == 'spconv': + conv = spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, + bias=False, indice_key=indice_key) + elif conv_type == 'inverseconv': + conv = spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False) + else: + raise NotImplementedError + + m = spconv.SparseSequential( + conv, + norm_fn(out_channels), + nn.ReLU(), + ) + + return m + + +class SparseBasicBlock(spconv.SparseModule): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, norm_fn=None, downsample=None, indice_key=None): + super(SparseBasicBlock, self).__init__() + + assert norm_fn is not None + bias = norm_fn is not None + self.conv1 = spconv.SubMConv3d( + inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key + ) + self.bn1 = norm_fn(planes) + self.relu = nn.ReLU() + self.conv2 = spconv.SubMConv3d( + planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key + ) + self.bn2 = norm_fn(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = replace_feature(out, self.bn1(out.features)) + out = replace_feature(out, self.relu(out.features)) + + out = self.conv2(out) + out = replace_feature(out, self.bn2(out.features)) + + if self.downsample is not None: + identity = self.downsample(x) + + out = replace_feature(out, out.features + identity.features) + out = replace_feature(out, self.relu(out.features)) + + return out + + +class VoxelResBackBone8xVoxelNeXt(nn.Module): + def __init__(self, model_cfg, input_channels, grid_size, **kwargs): + super().__init__() + self.model_cfg = model_cfg + norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01) + + spconv_kernel_sizes = model_cfg.get('SPCONV_KERNEL_SIZES', [3, 3, 3, 3]) + channels = model_cfg.get('CHANNELS', [16, 32, 64, 128, 128]) + out_channel = model_cfg.get('OUT_CHANNEL', 128) + + self.sparse_shape = grid_size[::-1] + [1, 0, 0] + + self.conv_input = spconv.SparseSequential( + spconv.SubMConv3d(input_channels, channels[0], 3, padding=1, bias=False, indice_key='subm1'), + norm_fn(channels[0]), + nn.ReLU(), + ) + block = post_act_block + + self.conv1 = spconv.SparseSequential( + SparseBasicBlock(channels[0], channels[0], norm_fn=norm_fn, indice_key='res1'), + SparseBasicBlock(channels[0], channels[0], norm_fn=norm_fn, indice_key='res1'), + ) + + self.conv2 = spconv.SparseSequential( + # [1600, 1408, 41] <- [800, 704, 21] + block(channels[0], channels[1], spconv_kernel_sizes[0], norm_fn=norm_fn, stride=2, padding=int(spconv_kernel_sizes[0]//2), indice_key='spconv2', conv_type='spconv'), + SparseBasicBlock(channels[1], channels[1], norm_fn=norm_fn, indice_key='res2'), + SparseBasicBlock(channels[1], channels[1], norm_fn=norm_fn, indice_key='res2'), + ) + + self.conv3 = spconv.SparseSequential( + # [800, 704, 21] <- [400, 352, 11] + block(channels[1], channels[2], spconv_kernel_sizes[1], norm_fn=norm_fn, stride=2, padding=int(spconv_kernel_sizes[1]//2), indice_key='spconv3', conv_type='spconv'), + SparseBasicBlock(channels[2], channels[2], norm_fn=norm_fn, indice_key='res3'), + SparseBasicBlock(channels[2], channels[2], norm_fn=norm_fn, indice_key='res3'), + ) + + self.conv4 = spconv.SparseSequential( + # [400, 352, 11] <- [200, 176, 6] + block(channels[2], channels[3], spconv_kernel_sizes[2], norm_fn=norm_fn, stride=2, padding=int(spconv_kernel_sizes[2]//2), indice_key='spconv4', conv_type='spconv'), + SparseBasicBlock(channels[3], channels[3], norm_fn=norm_fn, indice_key='res4'), + SparseBasicBlock(channels[3], channels[3], norm_fn=norm_fn, indice_key='res4'), + ) + + self.conv5 = spconv.SparseSequential( + # [200, 176, 6] <- [100, 88, 3] + block(channels[3], channels[4], spconv_kernel_sizes[3], norm_fn=norm_fn, stride=2, padding=int(spconv_kernel_sizes[3]//2), indice_key='spconv5', conv_type='spconv'), + SparseBasicBlock(channels[4], channels[4], norm_fn=norm_fn, indice_key='res5'), + SparseBasicBlock(channels[4], channels[4], norm_fn=norm_fn, indice_key='res5'), + ) + + self.conv6 = spconv.SparseSequential( + # [200, 176, 6] <- [100, 88, 3] + block(channels[4], channels[4], spconv_kernel_sizes[3], norm_fn=norm_fn, stride=2, padding=int(spconv_kernel_sizes[3]//2), indice_key='spconv6', conv_type='spconv'), + SparseBasicBlock(channels[4], channels[4], norm_fn=norm_fn, indice_key='res6'), + SparseBasicBlock(channels[4], channels[4], norm_fn=norm_fn, indice_key='res6'), + ) + self.conv_out = spconv.SparseSequential( + # [200, 150, 5] -> [200, 150, 2] + spconv.SparseConv2d(channels[3], out_channel, 3, stride=1, padding=1, bias=False, indice_key='spconv_down2'), + norm_fn(out_channel), + nn.ReLU(), + ) + + self.shared_conv = spconv.SparseSequential( + spconv.SubMConv2d(out_channel, out_channel, 3, stride=1, padding=1, bias=True), + nn.BatchNorm1d(out_channel), + nn.ReLU(True), + ) + + self.forward_ret_dict = {} + self.num_point_features = out_channel + self.backbone_channels = { + 'x_conv1': channels[0], + 'x_conv2': channels[1], + 'x_conv3': channels[2], + 'x_conv4': channels[3] + } + + def bev_out(self, x_conv): + features_cat = x_conv.features + indices_cat = x_conv.indices[:, [0, 2, 3]] + spatial_shape = x_conv.spatial_shape[1:] + + indices_unique, _inv = torch.unique(indices_cat, dim=0, return_inverse=True) + features_unique = features_cat.new_zeros((indices_unique.shape[0], features_cat.shape[1])) + features_unique.index_add_(0, _inv, features_cat) + + x_out = spconv.SparseConvTensor( + features=features_unique, + indices=indices_unique, + spatial_shape=spatial_shape, + batch_size=x_conv.batch_size + ) + return x_out + + def forward(self, batch_dict): + """ + Args: + batch_dict: + batch_size: int + vfe_features: (num_voxels, C) + voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx] + Returns: + batch_dict: + encoded_spconv_tensor: sparse tensor + """ + voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords'] + batch_size = batch_dict['batch_size'] + input_sp_tensor = spconv.SparseConvTensor( + features=voxel_features, + indices=voxel_coords.int(), + spatial_shape=self.sparse_shape, + batch_size=batch_size + ) + x = self.conv_input(input_sp_tensor) + + x_conv1 = self.conv1(x) + x_conv2 = self.conv2(x_conv1) + x_conv3 = self.conv3(x_conv2) + x_conv4 = self.conv4(x_conv3) + x_conv5 = self.conv5(x_conv4) + x_conv6 = self.conv6(x_conv5) + + x_conv5.indices[:, 1:] *= 2 + x_conv6.indices[:, 1:] *= 4 + x_conv4 = x_conv4.replace_feature(torch.cat([x_conv4.features, x_conv5.features, x_conv6.features])) + x_conv4.indices = torch.cat([x_conv4.indices, x_conv5.indices, x_conv6.indices]) + + out = self.bev_out(x_conv4) + + out = self.conv_out(out) + out = self.shared_conv(out) + + batch_dict.update({ + 'encoded_spconv_tensor': out, + 'encoded_spconv_tensor_stride': 8 + }) + batch_dict.update({ + 'multi_scale_3d_features': { + 'x_conv1': x_conv1, + 'x_conv2': x_conv2, + 'x_conv3': x_conv3, + 'x_conv4': x_conv4, + } + }) + batch_dict.update({ + 'multi_scale_3d_strides': { + 'x_conv1': 1, + 'x_conv2': 2, + 'x_conv3': 4, + 'x_conv4': 8, + } + }) + + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone_voxelnext2d.py b/toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone_voxelnext2d.py new file mode 100644 index 000000000..bdfdf9984 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/spconv_backbone_voxelnext2d.py @@ -0,0 +1,219 @@ +from functools import partial +import torch +import torch.nn as nn + +from ...utils.spconv_utils import replace_feature, spconv + + +def post_act_block(in_channels, out_channels, kernel_size, indice_key=None, stride=1, padding=0, + conv_type='subm', norm_fn=None): + + if conv_type == 'subm': + conv = spconv.SubMConv2d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key) + elif conv_type == 'spconv': + conv = spconv.SparseConv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, + bias=False, indice_key=indice_key) + elif conv_type == 'inverseconv': + conv = spconv.SparseInverseConv2d(in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False) + else: + raise NotImplementedError + + m = spconv.SparseSequential( + conv, + norm_fn(out_channels), + nn.ReLU(), + ) + + return m + + +class SparseBasicBlock(spconv.SparseModule): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, norm_fn=None, downsample=None, indice_key=None): + super(SparseBasicBlock, self).__init__() + + assert norm_fn is not None + bias = norm_fn is not None + self.conv1 = spconv.SubMConv2d( + inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key + ) + self.bn1 = norm_fn(planes) + self.relu = nn.ReLU() + self.conv2 = spconv.SubMConv2d( + planes, planes, kernel_size=3, stride=stride, padding=1, bias=bias, indice_key=indice_key + ) + self.bn2 = norm_fn(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = replace_feature(out, self.bn1(out.features)) + out = replace_feature(out, self.relu(out.features)) + + out = self.conv2(out) + out = replace_feature(out, self.bn2(out.features)) + + if self.downsample is not None: + identity = self.downsample(x) + + out = replace_feature(out, out.features + identity.features) + out = replace_feature(out, self.relu(out.features)) + + return out + + +class VoxelResBackBone8xVoxelNeXt2D(nn.Module): + def __init__(self, model_cfg, input_channels, grid_size, **kwargs): + super().__init__() + self.model_cfg = model_cfg + norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01) + self.sparse_shape = grid_size[[1, 0]] + + block = post_act_block + + spconv_kernel_sizes = model_cfg.get('SPCONV_KERNEL_SIZES', [3, 3, 3, 3]) + + self.conv1 = spconv.SparseSequential( + SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res1'), + SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res1'), + SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key='res1'), + ) + + self.conv2 = spconv.SparseSequential( + # [1600, 1408] <- [800, 704] + block(32, 64, spconv_kernel_sizes[0], norm_fn=norm_fn, stride=2, padding=int(spconv_kernel_sizes[0]//2), indice_key='spconv2', conv_type='spconv'), + SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res2'), + SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res2'), + SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res2'), + SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key='res2'), + ) + + self.conv3 = spconv.SparseSequential( + # [800, 704] <- [400, 352] + block(64, 128, spconv_kernel_sizes[1], norm_fn=norm_fn, stride=2, padding=int(spconv_kernel_sizes[1]//2), indice_key='spconv3', conv_type='spconv'), + SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res3'), + SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res3'), + SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res3'), + SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res3'), + SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res3'), + SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key='res3'), + ) + + self.conv4 = spconv.SparseSequential( + # [400, 352] <- [200, 176] + block(128, 256, spconv_kernel_sizes[2], norm_fn=norm_fn, stride=2, padding=int(spconv_kernel_sizes[2]//2), indice_key='spconv4', conv_type='spconv'), + SparseBasicBlock(256, 256, norm_fn=norm_fn, indice_key='res4'), + SparseBasicBlock(256, 256, norm_fn=norm_fn, indice_key='res4'), + SparseBasicBlock(256, 256, norm_fn=norm_fn, indice_key='res4'), + ) + + self.conv5 = spconv.SparseSequential( + # [400, 352] <- [200, 176] + block(256, 256, spconv_kernel_sizes[3], norm_fn=norm_fn, stride=2, padding=int(spconv_kernel_sizes[3]//2), indice_key='spconv5', conv_type='spconv'), + SparseBasicBlock(256, 256, norm_fn=norm_fn, indice_key='res5'), + SparseBasicBlock(256, 256, norm_fn=norm_fn, indice_key='res5'), + SparseBasicBlock(256, 256, norm_fn=norm_fn, indice_key='res5'), + ) + + self.conv6 = spconv.SparseSequential( + # [400, 352] <- [200, 176] + block(256, 256, spconv_kernel_sizes[3], norm_fn=norm_fn, stride=2, padding=int(spconv_kernel_sizes[3]//2), indice_key='spconv6', conv_type='spconv'), + SparseBasicBlock(256, 256, norm_fn=norm_fn, indice_key='res6'), + SparseBasicBlock(256, 256, norm_fn=norm_fn, indice_key='res6'), + SparseBasicBlock(256, 256, norm_fn=norm_fn, indice_key='res6'), + ) + + self.conv_out = spconv.SparseSequential( + # [200, 150, 5] -> [200, 150, 2] + spconv.SparseConv2d(256, 256, 3, stride=1, padding=1, bias=False, indice_key='spconv_down2'), + norm_fn(256), + nn.ReLU(), + ) + + self.shared_conv = spconv.SparseSequential( + spconv.SubMConv2d(256, 256, 3, stride=1, padding=1, bias=True), + nn.BatchNorm1d(256), + nn.ReLU(True), + ) + + self.num_point_features = 256 + self.backbone_channels = { + 'x_conv1': 32, + 'x_conv2': 64, + 'x_conv3': 128, + 'x_conv4': 256, + 'x_conv5': 256 + } + self.forward_ret_dict = {} + + def bev_out(self, x_conv): + features_cat = x_conv.features + indices_cat = x_conv.indices + + indices_unique, _inv = torch.unique(indices_cat, dim=0, return_inverse=True) + features_unique = features_cat.new_zeros((indices_unique.shape[0], features_cat.shape[1])) + features_unique.index_add_(0, _inv, features_cat) + + x_out = spconv.SparseConvTensor( + features=features_unique, + indices=indices_unique, + spatial_shape=x_conv.spatial_shape, + batch_size=x_conv.batch_size + ) + return x_out + + def forward(self, batch_dict): + pillar_features, pillar_coords = batch_dict['pillar_features'], batch_dict['pillar_coords'] + batch_size = batch_dict['batch_size'] + input_sp_tensor = spconv.SparseConvTensor( + features=pillar_features, + indices=pillar_coords.int(), + spatial_shape=self.sparse_shape, + batch_size=batch_size + ) + + x_conv1 = self.conv1(input_sp_tensor) + x_conv2 = self.conv2(x_conv1) + x_conv3 = self.conv3(x_conv2) + x_conv4 = self.conv4(x_conv3) + x_conv5 = self.conv5(x_conv4) + x_conv6 = self.conv6(x_conv5) + + x_conv5.indices[:, 1:] *= 2 + x_conv6.indices[:, 1:] *= 4 + x_conv4 = x_conv4.replace_feature(torch.cat([x_conv4.features, x_conv5.features, x_conv6.features])) + x_conv4.indices = torch.cat([x_conv4.indices, x_conv5.indices, x_conv6.indices]) + + out = self.bev_out(x_conv4) + + out = self.conv_out(out) + out = self.shared_conv(out) + + batch_dict.update({ + 'encoded_spconv_tensor': out, + 'encoded_spconv_tensor_stride': 8 + }) + batch_dict.update({ + 'multi_scale_2d_features': { + 'x_conv1': x_conv1, + 'x_conv2': x_conv2, + 'x_conv3': x_conv3, + 'x_conv4': x_conv4, + 'x_conv5': x_conv5, + } + }) + batch_dict.update({ + 'multi_scale_2d_strides': { + 'x_conv1': 1, + 'x_conv2': 2, + 'x_conv3': 4, + 'x_conv4': 8, + 'x_conv5': 16, + } + }) + + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/spconv_unet.py b/toolbox/openpcdet/pcdet/models/backbones_3d/spconv_unet.py new file mode 100644 index 000000000..a5e7c4b36 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/spconv_unet.py @@ -0,0 +1,212 @@ +from functools import partial + +import torch +import torch.nn as nn + +from ...utils.spconv_utils import replace_feature, spconv +from ...utils import common_utils +from .spconv_backbone import post_act_block + + +class SparseBasicBlock(spconv.SparseModule): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, indice_key=None, norm_fn=None): + super(SparseBasicBlock, self).__init__() + self.conv1 = spconv.SubMConv3d( + inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, indice_key=indice_key + ) + self.bn1 = norm_fn(planes) + self.relu = nn.ReLU() + self.conv2 = spconv.SubMConv3d( + planes, planes, kernel_size=3, stride=1, padding=1, bias=False, indice_key=indice_key + ) + self.bn2 = norm_fn(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x.features + + assert x.features.dim() == 2, 'x.features.dim()=%d' % x.features.dim() + + out = self.conv1(x) + out = replace_feature(out, self.bn1(out.features)) + out = replace_feature(out, self.relu(out.features)) + + out = self.conv2(out) + out = replace_feature(out, self.bn2(out.features)) + + if self.downsample is not None: + identity = self.downsample(x) + + out = replace_feature(out, out.features + identity) + out = replace_feature(out, self.relu(out.features)) + + return out + + +class UNetV2(nn.Module): + """ + Sparse Convolution based UNet for point-wise feature learning. + Reference Paper: https://arxiv.org/abs/1907.03670 (Shaoshuai Shi, et. al) + From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network + """ + + def __init__(self, model_cfg, input_channels, grid_size, voxel_size, point_cloud_range, **kwargs): + super().__init__() + self.model_cfg = model_cfg + self.sparse_shape = grid_size[::-1] + [1, 0, 0] + self.voxel_size = voxel_size + self.point_cloud_range = point_cloud_range + + norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01) + + self.conv_input = spconv.SparseSequential( + spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'), + norm_fn(16), + nn.ReLU(), + ) + block = post_act_block + + self.conv1 = spconv.SparseSequential( + block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'), + ) + + self.conv2 = spconv.SparseSequential( + # [1600, 1408, 41] <- [800, 704, 21] + block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'), + block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'), + block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'), + ) + + self.conv3 = spconv.SparseSequential( + # [800, 704, 21] <- [400, 352, 11] + block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'), + ) + + self.conv4 = spconv.SparseSequential( + # [400, 352, 11] <- [200, 176, 5] + block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'), + block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'), + ) + + if self.model_cfg.get('RETURN_ENCODED_TENSOR', True): + last_pad = self.model_cfg.get('last_pad', 0) + + self.conv_out = spconv.SparseSequential( + # [200, 150, 5] -> [200, 150, 2] + spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad, + bias=False, indice_key='spconv_down2'), + norm_fn(128), + nn.ReLU(), + ) + else: + self.conv_out = None + + # decoder + # [400, 352, 11] <- [200, 176, 5] + self.conv_up_t4 = SparseBasicBlock(64, 64, indice_key='subm4', norm_fn=norm_fn) + self.conv_up_m4 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4') + self.inv_conv4 = block(64, 64, 3, norm_fn=norm_fn, indice_key='spconv4', conv_type='inverseconv') + + # [800, 704, 21] <- [400, 352, 11] + self.conv_up_t3 = SparseBasicBlock(64, 64, indice_key='subm3', norm_fn=norm_fn) + self.conv_up_m3 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3') + self.inv_conv3 = block(64, 32, 3, norm_fn=norm_fn, indice_key='spconv3', conv_type='inverseconv') + + # [1600, 1408, 41] <- [800, 704, 21] + self.conv_up_t2 = SparseBasicBlock(32, 32, indice_key='subm2', norm_fn=norm_fn) + self.conv_up_m2 = block(64, 32, 3, norm_fn=norm_fn, indice_key='subm2') + self.inv_conv2 = block(32, 16, 3, norm_fn=norm_fn, indice_key='spconv2', conv_type='inverseconv') + + # [1600, 1408, 41] <- [1600, 1408, 41] + self.conv_up_t1 = SparseBasicBlock(16, 16, indice_key='subm1', norm_fn=norm_fn) + self.conv_up_m1 = block(32, 16, 3, norm_fn=norm_fn, indice_key='subm1') + + self.conv5 = spconv.SparseSequential( + block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1') + ) + self.num_point_features = 16 + + def UR_block_forward(self, x_lateral, x_bottom, conv_t, conv_m, conv_inv): + x_trans = conv_t(x_lateral) + x = x_trans + x = replace_feature(x, torch.cat((x_bottom.features, x_trans.features), dim=1)) + x_m = conv_m(x) + x = self.channel_reduction(x, x_m.features.shape[1]) + x = replace_feature(x, x_m.features + x.features) + x = conv_inv(x) + return x + + @staticmethod + def channel_reduction(x, out_channels): + """ + Args: + x: x.features (N, C1) + out_channels: C2 + + Returns: + + """ + features = x.features + n, in_channels = features.shape + assert (in_channels % out_channels == 0) and (in_channels >= out_channels) + + x = replace_feature(x, features.view(n, out_channels, -1).sum(dim=2)) + return x + + def forward(self, batch_dict): + """ + Args: + batch_dict: + batch_size: int + vfe_features: (num_voxels, C) + voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx] + Returns: + batch_dict: + encoded_spconv_tensor: sparse tensor + point_features: (N, C) + """ + voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords'] + batch_size = batch_dict['batch_size'] + input_sp_tensor = spconv.SparseConvTensor( + features=voxel_features, + indices=voxel_coords.int(), + spatial_shape=self.sparse_shape, + batch_size=batch_size + ) + x = self.conv_input(input_sp_tensor) + + x_conv1 = self.conv1(x) + x_conv2 = self.conv2(x_conv1) + x_conv3 = self.conv3(x_conv2) + x_conv4 = self.conv4(x_conv3) + + if self.conv_out is not None: + # for detection head + # [200, 176, 5] -> [200, 176, 2] + out = self.conv_out(x_conv4) + batch_dict['encoded_spconv_tensor'] = out + batch_dict['encoded_spconv_tensor_stride'] = 8 + + # for segmentation head + # [400, 352, 11] <- [200, 176, 5] + x_up4 = self.UR_block_forward(x_conv4, x_conv4, self.conv_up_t4, self.conv_up_m4, self.inv_conv4) + # [800, 704, 21] <- [400, 352, 11] + x_up3 = self.UR_block_forward(x_conv3, x_up4, self.conv_up_t3, self.conv_up_m3, self.inv_conv3) + # [1600, 1408, 41] <- [800, 704, 21] + x_up2 = self.UR_block_forward(x_conv2, x_up3, self.conv_up_t2, self.conv_up_m2, self.inv_conv2) + # [1600, 1408, 41] <- [1600, 1408, 41] + x_up1 = self.UR_block_forward(x_conv1, x_up2, self.conv_up_t1, self.conv_up_m1, self.conv5) + + batch_dict['point_features'] = x_up1.features + point_coords = common_utils.get_voxel_centers( + x_up1.indices[:, 1:], downsample_times=1, voxel_size=self.voxel_size, + point_cloud_range=self.point_cloud_range + ) + batch_dict['point_coords'] = torch.cat((x_up1.indices[:, 0:1].float(), point_coords), dim=1) + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/__init__.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/__init__.py new file mode 100644 index 000000000..e6660168c --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/__init__.py @@ -0,0 +1,18 @@ +from .mean_vfe import MeanVFE +from .pillar_vfe import PillarVFE +from .dynamic_mean_vfe import DynamicMeanVFE +from .dynamic_pillar_vfe import DynamicPillarVFE, DynamicPillarVFESimple2D +from .dynamic_voxel_vfe import DynamicVoxelVFE +from .image_vfe import ImageVFE +from .vfe_template import VFETemplate + +__all__ = { + 'VFETemplate': VFETemplate, + 'MeanVFE': MeanVFE, + 'PillarVFE': PillarVFE, + 'ImageVFE': ImageVFE, + 'DynMeanVFE': DynamicMeanVFE, + 'DynPillarVFE': DynamicPillarVFE, + 'DynamicPillarVFESimple2D': DynamicPillarVFESimple2D, + 'DynamicVoxelVFE': DynamicVoxelVFE, +} diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/dynamic_mean_vfe.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/dynamic_mean_vfe.py new file mode 100644 index 000000000..b4c5b0667 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/dynamic_mean_vfe.py @@ -0,0 +1,76 @@ +import torch + +from .vfe_template import VFETemplate + +try: + import torch_scatter +except Exception as e: + # Incase someone doesn't want to use dynamic pillar vfe and hasn't installed torch_scatter + pass + +from .vfe_template import VFETemplate + + +class DynamicMeanVFE(VFETemplate): + def __init__(self, model_cfg, num_point_features, voxel_size, grid_size, point_cloud_range, **kwargs): + super().__init__(model_cfg=model_cfg) + self.num_point_features = num_point_features + + self.grid_size = torch.tensor(grid_size).cuda() + self.voxel_size = torch.tensor(voxel_size).cuda() + self.point_cloud_range = torch.tensor(point_cloud_range).cuda() + + self.voxel_x = voxel_size[0] + self.voxel_y = voxel_size[1] + self.voxel_z = voxel_size[2] + self.x_offset = self.voxel_x / 2 + point_cloud_range[0] + self.y_offset = self.voxel_y / 2 + point_cloud_range[1] + self.z_offset = self.voxel_z / 2 + point_cloud_range[2] + + self.scale_xyz = grid_size[0] * grid_size[1] * grid_size[2] + self.scale_yz = grid_size[1] * grid_size[2] + self.scale_z = grid_size[2] + + def get_output_feature_dim(self): + return self.num_point_features + + @torch.no_grad() + def forward(self, batch_dict, **kwargs): + """ + Args: + batch_dict: + voxels: (num_voxels, max_points_per_voxel, C) + voxel_num_points: optional (num_voxels) + **kwargs: + + Returns: + vfe_features: (num_voxels, C) + """ + batch_size = batch_dict['batch_size'] + points = batch_dict['points'] # (batch_idx, x, y, z, i, e) + + # # debug + point_coords = torch.floor((points[:, 1:4] - self.point_cloud_range[0:3]) / self.voxel_size).int() + mask = ((point_coords >= 0) & (point_coords < self.grid_size)).all(dim=1) + points = points[mask] + point_coords = point_coords[mask] + merge_coords = points[:, 0].int() * self.scale_xyz + \ + point_coords[:, 0] * self.scale_yz + \ + point_coords[:, 1] * self.scale_z + \ + point_coords[:, 2] + points_data = points[:, 1:].contiguous() + + unq_coords, unq_inv, unq_cnt = torch.unique(merge_coords, return_inverse=True, return_counts=True) + + points_mean = torch_scatter.scatter_mean(points_data, unq_inv, dim=0) + + unq_coords = unq_coords.int() + voxel_coords = torch.stack((unq_coords // self.scale_xyz, + (unq_coords % self.scale_xyz) // self.scale_yz, + (unq_coords % self.scale_yz) // self.scale_z, + unq_coords % self.scale_z), dim=1) + voxel_coords = voxel_coords[:, [0, 3, 2, 1]] + + batch_dict['voxel_features'] = points_mean.contiguous() + batch_dict['voxel_coords'] = voxel_coords.contiguous() + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/dynamic_pillar_vfe.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/dynamic_pillar_vfe.py new file mode 100644 index 000000000..f5fb6b1ac --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/dynamic_pillar_vfe.py @@ -0,0 +1,240 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +try: + import torch_scatter +except Exception as e: + # Incase someone doesn't want to use dynamic pillar vfe and hasn't installed torch_scatter + pass + +from .vfe_template import VFETemplate + + +class PFNLayerV2(nn.Module): + def __init__(self, + in_channels, + out_channels, + use_norm=True, + last_layer=False): + super().__init__() + + self.last_vfe = last_layer + self.use_norm = use_norm + if not self.last_vfe: + out_channels = out_channels // 2 + + if self.use_norm: + self.linear = nn.Linear(in_channels, out_channels, bias=False) + self.norm = nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01) + else: + self.linear = nn.Linear(in_channels, out_channels, bias=True) + + self.relu = nn.ReLU() + + def forward(self, inputs, unq_inv): + + x = self.linear(inputs) + x = self.norm(x) if self.use_norm else x + x = self.relu(x) + x_max = torch_scatter.scatter_max(x, unq_inv, dim=0)[0] + + if self.last_vfe: + return x_max + else: + x_concatenated = torch.cat([x, x_max[unq_inv, :]], dim=1) + return x_concatenated + + +class DynamicPillarVFE(VFETemplate): + def __init__(self, model_cfg, num_point_features, voxel_size, grid_size, point_cloud_range, **kwargs): + super().__init__(model_cfg=model_cfg) + + self.use_norm = self.model_cfg.USE_NORM + self.with_distance = self.model_cfg.WITH_DISTANCE + self.use_absolute_xyz = self.model_cfg.USE_ABSLOTE_XYZ + num_point_features += 6 if self.use_absolute_xyz else 3 + if self.with_distance: + num_point_features += 1 + + self.num_filters = self.model_cfg.NUM_FILTERS + assert len(self.num_filters) > 0 + num_filters = [num_point_features] + list(self.num_filters) + + pfn_layers = [] + for i in range(len(num_filters) - 1): + in_filters = num_filters[i] + out_filters = num_filters[i + 1] + pfn_layers.append( + PFNLayerV2(in_filters, out_filters, self.use_norm, last_layer=(i >= len(num_filters) - 2)) + ) + self.pfn_layers = nn.ModuleList(pfn_layers) + + self.voxel_x = voxel_size[0] + self.voxel_y = voxel_size[1] + self.voxel_z = voxel_size[2] + self.x_offset = self.voxel_x / 2 + point_cloud_range[0] + self.y_offset = self.voxel_y / 2 + point_cloud_range[1] + self.z_offset = self.voxel_z / 2 + point_cloud_range[2] + + self.scale_xy = grid_size[0] * grid_size[1] + self.scale_y = grid_size[1] + + self.grid_size = torch.tensor(grid_size).cuda() + self.voxel_size = torch.tensor(voxel_size).cuda() + self.point_cloud_range = torch.tensor(point_cloud_range).cuda() + + def get_output_feature_dim(self): + return self.num_filters[-1] + + def forward(self, batch_dict, **kwargs): + points = batch_dict['points'] # (batch_idx, x, y, z, i, e) + + points_coords = torch.floor((points[:, [1,2]] - self.point_cloud_range[[0,1]]) / self.voxel_size[[0,1]]).int() + mask = ((points_coords >= 0) & (points_coords < self.grid_size[[0,1]])).all(dim=1) + points = points[mask] + points_coords = points_coords[mask] + points_xyz = points[:, [1, 2, 3]].contiguous() + + merge_coords = points[:, 0].int() * self.scale_xy + \ + points_coords[:, 0] * self.scale_y + \ + points_coords[:, 1] + + unq_coords, unq_inv, unq_cnt = torch.unique(merge_coords, return_inverse=True, return_counts=True, dim=0) + + points_mean = torch_scatter.scatter_mean(points_xyz, unq_inv, dim=0) + f_cluster = points_xyz - points_mean[unq_inv, :] + + f_center = torch.zeros_like(points_xyz) + f_center[:, 0] = points_xyz[:, 0] - (points_coords[:, 0].to(points_xyz.dtype) * self.voxel_x + self.x_offset) + f_center[:, 1] = points_xyz[:, 1] - (points_coords[:, 1].to(points_xyz.dtype) * self.voxel_y + self.y_offset) + f_center[:, 2] = points_xyz[:, 2] - self.z_offset + + if self.use_absolute_xyz: + features = [points[:, 1:], f_cluster, f_center] + else: + features = [points[:, 4:], f_cluster, f_center] + + if self.with_distance: + points_dist = torch.norm(points[:, 1:4], 2, dim=1, keepdim=True) + features.append(points_dist) + features = torch.cat(features, dim=-1) + + for pfn in self.pfn_layers: + features = pfn(features, unq_inv) + # features = self.linear1(features) + # features_max = torch_scatter.scatter_max(features, unq_inv, dim=0)[0] + # features = torch.cat([features, features_max[unq_inv, :]], dim=1) + # features = self.linear2(features) + # features = torch_scatter.scatter_max(features, unq_inv, dim=0)[0] + + # generate voxel coordinates + unq_coords = unq_coords.int() + voxel_coords = torch.stack((unq_coords // self.scale_xy, + (unq_coords % self.scale_xy) // self.scale_y, + unq_coords % self.scale_y, + torch.zeros(unq_coords.shape[0]).to(unq_coords.device).int() + ), dim=1) + voxel_coords = voxel_coords[:, [0, 3, 2, 1]] + + batch_dict['voxel_features'] = batch_dict['pillar_features'] = features + batch_dict['voxel_coords'] = voxel_coords + return batch_dict + + +class DynamicPillarVFESimple2D(VFETemplate): + def __init__(self, model_cfg, num_point_features, voxel_size, grid_size, point_cloud_range, **kwargs): + super().__init__(model_cfg=model_cfg) + + self.use_norm = self.model_cfg.USE_NORM + self.with_distance = self.model_cfg.WITH_DISTANCE + self.use_absolute_xyz = self.model_cfg.USE_ABSLOTE_XYZ + # self.use_cluster_xyz = self.model_cfg.get('USE_CLUSTER_XYZ', True) + if self.use_absolute_xyz: + num_point_features += 3 + # if self.use_cluster_xyz: + # num_point_features += 3 + if self.with_distance: + num_point_features += 1 + + self.num_filters = self.model_cfg.NUM_FILTERS + assert len(self.num_filters) > 0 + num_filters = [num_point_features] + list(self.num_filters) + + pfn_layers = [] + for i in range(len(num_filters) - 1): + in_filters = num_filters[i] + out_filters = num_filters[i + 1] + pfn_layers.append( + PFNLayerV2(in_filters, out_filters, self.use_norm, last_layer=(i >= len(num_filters) - 2)) + ) + self.pfn_layers = nn.ModuleList(pfn_layers) + + self.voxel_x = voxel_size[0] + self.voxel_y = voxel_size[1] + self.voxel_z = voxel_size[2] + self.x_offset = self.voxel_x / 2 + point_cloud_range[0] + self.y_offset = self.voxel_y / 2 + point_cloud_range[1] + self.z_offset = self.voxel_z / 2 + point_cloud_range[2] + + self.scale_xy = grid_size[0] * grid_size[1] + self.scale_y = grid_size[1] + + self.grid_size = torch.tensor(grid_size[:2]).cuda() + self.voxel_size = torch.tensor(voxel_size).cuda() + self.point_cloud_range = torch.tensor(point_cloud_range).cuda() + + def get_output_feature_dim(self): + return self.num_filters[-1] + + def forward(self, batch_dict, **kwargs): + points = batch_dict['points'] # (batch_idx, x, y, z, i, e) + + points_coords = torch.floor( + (points[:, [1, 2]] - self.point_cloud_range[[0, 1]]) / self.voxel_size[[0, 1]]).int() + mask = ((points_coords >= 0) & (points_coords < self.grid_size[[0, 1]])).all(dim=1) + points = points[mask] + points_coords = points_coords[mask] + points_xyz = points[:, [1, 2, 3]].contiguous() + + merge_coords = points[:, 0].int() * self.scale_xy + \ + points_coords[:, 0] * self.scale_y + \ + points_coords[:, 1] + + unq_coords, unq_inv, unq_cnt = torch.unique(merge_coords, return_inverse=True, return_counts=True, dim=0) + + f_center = torch.zeros_like(points_xyz) + f_center[:, 0] = points_xyz[:, 0] - (points_coords[:, 0].to(points_xyz.dtype) * self.voxel_x + self.x_offset) + f_center[:, 1] = points_xyz[:, 1] - (points_coords[:, 1].to(points_xyz.dtype) * self.voxel_y + self.y_offset) + f_center[:, 2] = points_xyz[:, 2] - self.z_offset + + features = [f_center] + if self.use_absolute_xyz: + features.append(points[:, 1:]) + else: + features.append(points[:, 4:]) + + # if self.use_cluster_xyz: + # points_mean = torch_scatter.scatter_mean(points_xyz, unq_inv, dim=0) + # f_cluster = points_xyz - points_mean[unq_inv, :] + # features.append(f_cluster) + + if self.with_distance: + points_dist = torch.norm(points[:, 1:4], 2, dim=1, keepdim=True) + features.append(points_dist) + features = torch.cat(features, dim=-1) + + for pfn in self.pfn_layers: + features = pfn(features, unq_inv) + + # generate voxel coordinates + unq_coords = unq_coords.int() + pillar_coords = torch.stack((unq_coords // self.scale_xy, + (unq_coords % self.scale_xy) // self.scale_y, + unq_coords % self.scale_y, + ), dim=1) + pillar_coords = pillar_coords[:, [0, 2, 1]] + + batch_dict['pillar_features'] = features + batch_dict['pillar_coords'] = pillar_coords + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/dynamic_voxel_vfe.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/dynamic_voxel_vfe.py new file mode 100644 index 000000000..d878d491f --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/dynamic_voxel_vfe.py @@ -0,0 +1,106 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +try: + import torch_scatter +except Exception as e: + # Incase someone doesn't want to use dynamic pillar vfe and hasn't installed torch_scatter + pass + +from .vfe_template import VFETemplate +from .dynamic_pillar_vfe import PFNLayerV2 + + +class DynamicVoxelVFE(VFETemplate): + def __init__(self, model_cfg, num_point_features, voxel_size, grid_size, point_cloud_range, **kwargs): + super().__init__(model_cfg=model_cfg) + + self.use_norm = self.model_cfg.USE_NORM + self.with_distance = self.model_cfg.WITH_DISTANCE + self.use_absolute_xyz = self.model_cfg.USE_ABSLOTE_XYZ + num_point_features += 6 if self.use_absolute_xyz else 3 + if self.with_distance: + num_point_features += 1 + + self.num_filters = self.model_cfg.NUM_FILTERS + assert len(self.num_filters) > 0 + num_filters = [num_point_features] + list(self.num_filters) + + pfn_layers = [] + for i in range(len(num_filters) - 1): + in_filters = num_filters[i] + out_filters = num_filters[i + 1] + pfn_layers.append( + PFNLayerV2(in_filters, out_filters, self.use_norm, last_layer=(i >= len(num_filters) - 2)) + ) + self.pfn_layers = nn.ModuleList(pfn_layers) + + self.voxel_x = voxel_size[0] + self.voxel_y = voxel_size[1] + self.voxel_z = voxel_size[2] + self.x_offset = self.voxel_x / 2 + point_cloud_range[0] + self.y_offset = self.voxel_y / 2 + point_cloud_range[1] + self.z_offset = self.voxel_z / 2 + point_cloud_range[2] + + self.scale_xyz = grid_size[0] * grid_size[1] * grid_size[2] + self.scale_yz = grid_size[1] * grid_size[2] + self.scale_z = grid_size[2] + + self.grid_size = torch.tensor(grid_size).cuda() + self.voxel_size = torch.tensor(voxel_size).cuda() + self.point_cloud_range = torch.tensor(point_cloud_range).cuda() + + def get_output_feature_dim(self): + return self.num_filters[-1] + + def forward(self, batch_dict, **kwargs): + points = batch_dict['points'] # (batch_idx, x, y, z, i, e) + + points_coords = torch.floor((points[:, [1,2,3]] - self.point_cloud_range[[0,1,2]]) / self.voxel_size[[0,1,2]]).int() + mask = ((points_coords >= 0) & (points_coords < self.grid_size[[0,1,2]])).all(dim=1) + points = points[mask] + points_coords = points_coords[mask] + points_xyz = points[:, [1, 2, 3]].contiguous() + + merge_coords = points[:, 0].int() * self.scale_xyz + \ + points_coords[:, 0] * self.scale_yz + \ + points_coords[:, 1] * self.scale_z + \ + points_coords[:, 2] + + unq_coords, unq_inv, unq_cnt = torch.unique(merge_coords, return_inverse=True, return_counts=True, dim=0) + + points_mean = torch_scatter.scatter_mean(points_xyz, unq_inv, dim=0) + f_cluster = points_xyz - points_mean[unq_inv, :] + + f_center = torch.zeros_like(points_xyz) + f_center[:, 0] = points_xyz[:, 0] - (points_coords[:, 0].to(points_xyz.dtype) * self.voxel_x + self.x_offset) + f_center[:, 1] = points_xyz[:, 1] - (points_coords[:, 1].to(points_xyz.dtype) * self.voxel_y + self.y_offset) + # f_center[:, 2] = points_xyz[:, 2] - self.z_offset + f_center[:, 2] = points_xyz[:, 2] - (points_coords[:, 2].to(points_xyz.dtype) * self.voxel_z + self.z_offset) + + if self.use_absolute_xyz: + features = [points[:, 1:], f_cluster, f_center] + else: + features = [points[:, 4:], f_cluster, f_center] + + if self.with_distance: + points_dist = torch.norm(points[:, 1:4], 2, dim=1, keepdim=True) + features.append(points_dist) + features = torch.cat(features, dim=-1) + + for pfn in self.pfn_layers: + features = pfn(features, unq_inv) + + # generate voxel coordinates + unq_coords = unq_coords.int() + voxel_coords = torch.stack((unq_coords // self.scale_xyz, + (unq_coords % self.scale_xyz) // self.scale_yz, + (unq_coords % self.scale_yz) // self.scale_z, + unq_coords % self.scale_z), dim=1) + voxel_coords = voxel_coords[:, [0, 3, 2, 1]] + + batch_dict['pillar_features'] = batch_dict['voxel_features'] = features + batch_dict['voxel_coords'] = voxel_coords + + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe.py new file mode 100644 index 000000000..f6269869b --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe.py @@ -0,0 +1,85 @@ +import torch + +from .vfe_template import VFETemplate +from .image_vfe_modules import ffn, f2v + + +class ImageVFE(VFETemplate): + def __init__(self, model_cfg, grid_size, point_cloud_range, depth_downsample_factor, **kwargs): + super().__init__(model_cfg=model_cfg) + self.grid_size = grid_size + self.pc_range = point_cloud_range + self.downsample_factor = depth_downsample_factor + self.module_topology = [ + 'ffn', 'f2v' + ] + self.build_modules() + + def build_modules(self): + """ + Builds modules + """ + for module_name in self.module_topology: + module = getattr(self, 'build_%s' % module_name)() + self.add_module(module_name, module) + + def build_ffn(self): + """ + Builds frustum feature network + Returns: + ffn_module: nn.Module, Frustum feature network + """ + ffn_module = ffn.__all__[self.model_cfg.FFN.NAME]( + model_cfg=self.model_cfg.FFN, + downsample_factor=self.downsample_factor + ) + self.disc_cfg = ffn_module.disc_cfg + return ffn_module + + def build_f2v(self): + """ + Builds frustum to voxel transformation + Returns: + f2v_module: nn.Module, Frustum to voxel transformation + """ + f2v_module = f2v.__all__[self.model_cfg.F2V.NAME]( + model_cfg=self.model_cfg.F2V, + grid_size=self.grid_size, + pc_range=self.pc_range, + disc_cfg=self.disc_cfg + ) + return f2v_module + + def get_output_feature_dim(self): + """ + Gets number of output channels + Returns: + out_feature_dim: int, Number of output channels + """ + out_feature_dim = self.ffn.get_output_feature_dim() + return out_feature_dim + + def forward(self, batch_dict, **kwargs): + """ + Args: + batch_dict: + images: (N, 3, H_in, W_in), Input images + **kwargs: + Returns: + batch_dict: + voxel_features: (B, C, Z, Y, X), Image voxel features + """ + batch_dict = self.ffn(batch_dict) + batch_dict = self.f2v(batch_dict) + return batch_dict + + def get_loss(self): + """ + Gets DDN loss + Returns: + loss: (1), Depth distribution network loss + tb_dict: dict[float], All losses to log in tensorboard + """ + + loss, tb_dict = self.ffn.get_loss() + return loss, tb_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/__init__.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/__init__.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/__init__.py new file mode 100644 index 000000000..58b43bb65 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/__init__.py @@ -0,0 +1,5 @@ +from .frustum_to_voxel import FrustumToVoxel + +__all__ = { + 'FrustumToVoxel': FrustumToVoxel +} diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/frustum_grid_generator.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/frustum_grid_generator.py new file mode 100644 index 000000000..c4d6d9822 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/frustum_grid_generator.py @@ -0,0 +1,145 @@ +import torch +import torch.nn as nn + +try: + from kornia.utils.grid import create_meshgrid3d + from kornia.geometry.linalg import transform_points +except Exception as e: + # Note: Kornia team will fix this import issue to try to allow the usage of lower torch versions. + # print('Warning: kornia is not installed correctly, please ignore this warning if you do not use CaDDN. Otherwise, it is recommended to use torch version greater than 1.2 to use kornia properly.') + pass + +from pcdet.utils import transform_utils + + +class FrustumGridGenerator(nn.Module): + + def __init__(self, grid_size, pc_range, disc_cfg): + """ + Initializes Grid Generator for frustum features + Args: + grid_size: [X, Y, Z], Voxel grid size + pc_range: [x_min, y_min, z_min, x_max, y_max, z_max], Voxelization point cloud range (m) + disc_cfg: EasyDict, Depth discretiziation configuration + """ + super().__init__() + try: + import kornia + except Exception as e: + # Note: Kornia team will fix this import issue to try to allow the usage of lower torch versions. + print('Error: kornia is not installed correctly, please ignore this warning if you do not use CaDDN. ' + 'Otherwise, it is recommended to use torch version greater than 1.2 to use kornia properly.') + exit(-1) + + self.dtype = torch.float32 + self.grid_size = torch.as_tensor(grid_size, dtype=self.dtype) + self.pc_range = pc_range + self.out_of_bounds_val = -2 + self.disc_cfg = disc_cfg + + # Calculate voxel size + pc_range = torch.as_tensor(pc_range).reshape(2, 3) + self.pc_min = pc_range[0] + self.pc_max = pc_range[1] + self.voxel_size = (self.pc_max - self.pc_min) / self.grid_size + + # Create voxel grid + self.depth, self.width, self.height = self.grid_size.int() + self.voxel_grid = create_meshgrid3d(depth=self.depth, + height=self.height, + width=self.width, + normalized_coordinates=False) + + self.voxel_grid = self.voxel_grid.permute(0, 1, 3, 2, 4) # XZY-> XYZ + + # Add offsets to center of voxel + self.voxel_grid += 0.5 + self.grid_to_lidar = self.grid_to_lidar_unproject(pc_min=self.pc_min, + voxel_size=self.voxel_size) + + def grid_to_lidar_unproject(self, pc_min, voxel_size): + """ + Calculate grid to LiDAR unprojection for each plane + Args: + pc_min: [x_min, y_min, z_min], Minimum of point cloud range (m) + voxel_size: [x, y, z], Size of each voxel (m) + Returns: + unproject: (4, 4), Voxel grid to LiDAR unprojection matrix + """ + x_size, y_size, z_size = voxel_size + x_min, y_min, z_min = pc_min + unproject = torch.tensor([[x_size, 0, 0, x_min], + [0, y_size, 0, y_min], + [0, 0, z_size, z_min], + [0, 0, 0, 1]], + dtype=self.dtype) # (4, 4) + + return unproject + + def transform_grid(self, voxel_grid, grid_to_lidar, lidar_to_cam, cam_to_img): + """ + Transforms voxel sampling grid into frustum sampling grid + Args: + grid: (B, X, Y, Z, 3), Voxel sampling grid + grid_to_lidar: (4, 4), Voxel grid to LiDAR unprojection matrix + lidar_to_cam: (B, 4, 4), LiDAR to camera frame transformation + cam_to_img: (B, 3, 4), Camera projection matrix + Returns: + frustum_grid: (B, X, Y, Z, 3), Frustum sampling grid + """ + B = lidar_to_cam.shape[0] + + # Create transformation matricies + V_G = grid_to_lidar # Voxel Grid -> LiDAR (4, 4) + C_V = lidar_to_cam # LiDAR -> Camera (B, 4, 4) + I_C = cam_to_img # Camera -> Image (B, 3, 4) + trans = C_V @ V_G + + # Reshape to match dimensions + trans = trans.reshape(B, 1, 1, 4, 4) + voxel_grid = voxel_grid.repeat_interleave(repeats=B, dim=0) + + # Transform to camera frame + camera_grid = transform_points(trans_01=trans, points_1=voxel_grid) + + # Project to image + I_C = I_C.reshape(B, 1, 1, 3, 4) + image_grid, image_depths = transform_utils.project_to_image(project=I_C, points=camera_grid) + + # Convert depths to depth bins + image_depths = transform_utils.bin_depths(depth_map=image_depths, **self.disc_cfg) + + # Stack to form frustum grid + image_depths = image_depths.unsqueeze(-1) + frustum_grid = torch.cat((image_grid, image_depths), dim=-1) + return frustum_grid + + def forward(self, lidar_to_cam, cam_to_img, image_shape): + """ + Generates sampling grid for frustum features + Args: + lidar_to_cam: (B, 4, 4), LiDAR to camera frame transformation + cam_to_img: (B, 3, 4), Camera projection matrix + image_shape: (B, 2), Image shape [H, W] + Returns: + frustum_grid (B, X, Y, Z, 3), Sampling grids for frustum features + """ + + frustum_grid = self.transform_grid(voxel_grid=self.voxel_grid.to(lidar_to_cam.device), + grid_to_lidar=self.grid_to_lidar.to(lidar_to_cam.device), + lidar_to_cam=lidar_to_cam, + cam_to_img=cam_to_img) + + # Normalize grid + image_shape, _ = torch.max(image_shape, dim=0) + image_depth = torch.tensor([self.disc_cfg["num_bins"]], + device=image_shape.device, + dtype=image_shape.dtype) + frustum_shape = torch.cat((image_depth, image_shape)) + frustum_grid = transform_utils.normalize_coords(coords=frustum_grid, shape=frustum_shape) + + # Replace any NaNs or infinites with out of bounds + mask = ~torch.isfinite(frustum_grid) + frustum_grid[mask] = self.out_of_bounds_val + + return frustum_grid diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/frustum_to_voxel.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/frustum_to_voxel.py new file mode 100644 index 000000000..a1a66b5e5 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/frustum_to_voxel.py @@ -0,0 +1,54 @@ +import torch +import torch.nn as nn + +from .frustum_grid_generator import FrustumGridGenerator +from .sampler import Sampler + + +class FrustumToVoxel(nn.Module): + + def __init__(self, model_cfg, grid_size, pc_range, disc_cfg): + """ + Initializes module to transform frustum features to voxel features via 3D transformation and sampling + Args: + model_cfg: EasyDict, Module configuration + grid_size: [X, Y, Z], Voxel grid size + pc_range: [x_min, y_min, z_min, x_max, y_max, z_max], Voxelization point cloud range (m) + disc_cfg: EasyDict, Depth discretiziation configuration + """ + super().__init__() + self.model_cfg = model_cfg + self.grid_size = grid_size + self.pc_range = pc_range + self.disc_cfg = disc_cfg + self.grid_generator = FrustumGridGenerator(grid_size=grid_size, + pc_range=pc_range, + disc_cfg=disc_cfg) + self.sampler = Sampler(**model_cfg.SAMPLER) + + def forward(self, batch_dict): + """ + Generates voxel features via 3D transformation and sampling + Args: + batch_dict: + frustum_features: (B, C, D, H_image, W_image), Image frustum features + lidar_to_cam: (B, 4, 4), LiDAR to camera frame transformation + cam_to_img: (B, 3, 4), Camera projection matrix + image_shape: (B, 2), Image shape [H, W] + Returns: + batch_dict: + voxel_features: (B, C, Z, Y, X), Image voxel features + """ + # Generate sampling grid for frustum volume + grid = self.grid_generator(lidar_to_cam=batch_dict["trans_lidar_to_cam"], + cam_to_img=batch_dict["trans_cam_to_img"], + image_shape=batch_dict["image_shape"]) # (B, X, Y, Z, 3) + + # Sample frustum volume to generate voxel volume + voxel_features = self.sampler(input_features=batch_dict["frustum_features"], + grid=grid) # (B, C, X, Y, Z) + + # (B, C, X, Y, Z) -> (B, C, Z, Y, X) + voxel_features = voxel_features.permute(0, 1, 4, 3, 2) + batch_dict["voxel_features"] = voxel_features + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/sampler.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/sampler.py new file mode 100644 index 000000000..9ce51f450 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/f2v/sampler.py @@ -0,0 +1,37 @@ +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Sampler(nn.Module): + + def __init__(self, mode="bilinear", padding_mode="zeros"): + """ + Initializes module + Args: + mode: string, Sampling mode [bilinear/nearest] + padding_mode: string, Padding mode for outside grid values [zeros/border/reflection] + """ + super().__init__() + self.mode = mode + self.padding_mode = padding_mode + + if torch.__version__ >= '1.3': + self.grid_sample = partial(F.grid_sample, align_corners=True) + else: + self.grid_sample = F.grid_sample + + def forward(self, input_features, grid): + """ + Samples input using sampling grid + Args: + input_features: (B, C, D, H, W), Input frustum features + grid: (B, X, Y, Z, 3), Sampling grids for input features + Returns + output_features: (B, C, X, Y, Z) Output voxel features + """ + # Sample from grid + output = self.grid_sample(input=input_features, grid=grid, mode=self.mode, padding_mode=self.padding_mode) + return output diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/__init__.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/__init__.py new file mode 100644 index 000000000..576d5ebd6 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/__init__.py @@ -0,0 +1,5 @@ +from .depth_ffn import DepthFFN + +__all__ = { + 'DepthFFN': DepthFFN +} diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/__init__.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/__init__.py new file mode 100644 index 000000000..acbaf1060 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/__init__.py @@ -0,0 +1,5 @@ +from .ddn_deeplabv3 import DDNDeepLabV3 + +__all__ = { + 'DDNDeepLabV3': DDNDeepLabV3 +} diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/ddn_deeplabv3.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/ddn_deeplabv3.py new file mode 100644 index 000000000..76be8ca62 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/ddn_deeplabv3.py @@ -0,0 +1,24 @@ +from .ddn_template import DDNTemplate + +try: + import torchvision +except: + pass + + +class DDNDeepLabV3(DDNTemplate): + + def __init__(self, backbone_name, **kwargs): + """ + Initializes DDNDeepLabV3 model + Args: + backbone_name: string, ResNet Backbone Name [ResNet50/ResNet101] + """ + if backbone_name == "ResNet50": + constructor = torchvision.models.segmentation.deeplabv3_resnet50 + elif backbone_name == "ResNet101": + constructor = torchvision.models.segmentation.deeplabv3_resnet101 + else: + raise NotImplementedError + + super().__init__(constructor=constructor, **kwargs) diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/ddn_template.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/ddn_template.py new file mode 100644 index 000000000..be110d38d --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn/ddn_template.py @@ -0,0 +1,162 @@ +from collections import OrderedDict +from pathlib import Path +from torch import hub + +import torch +import torch.nn as nn +import torch.nn.functional as F + +try: + from kornia.enhance.normalize import normalize +except: + pass + # print('Warning: kornia is not installed. This package is only required by CaDDN') + + +class DDNTemplate(nn.Module): + + def __init__(self, constructor, feat_extract_layer, num_classes, pretrained_path=None, aux_loss=None): + """ + Initializes depth distribution network. + Args: + constructor: function, Model constructor + feat_extract_layer: string, Layer to extract features from + num_classes: int, Number of classes + pretrained_path: string, (Optional) Path of the model to load weights from + aux_loss: bool, Flag to include auxillary loss + """ + super().__init__() + self.num_classes = num_classes + self.pretrained_path = pretrained_path + self.pretrained = pretrained_path is not None + self.aux_loss = aux_loss + + if self.pretrained: + # Preprocess Module + self.norm_mean = torch.Tensor([0.485, 0.456, 0.406]) + self.norm_std = torch.Tensor([0.229, 0.224, 0.225]) + + # Model + self.model = self.get_model(constructor=constructor) + self.feat_extract_layer = feat_extract_layer + self.model.backbone.return_layers = { + feat_extract_layer: 'features', + **self.model.backbone.return_layers + } + + def get_model(self, constructor): + """ + Get model + Args: + constructor: function, Model constructor + Returns: + model: nn.Module, Model + """ + # Get model + model = constructor(pretrained=False, + pretrained_backbone=False, + num_classes=self.num_classes, + aux_loss=self.aux_loss) + + # Update weights + if self.pretrained_path is not None: + model_dict = model.state_dict() + + # Download pretrained model if not available yet + checkpoint_path = Path(self.pretrained_path) + if not checkpoint_path.exists(): + checkpoint = checkpoint_path.name + save_dir = checkpoint_path.parent + save_dir.mkdir(parents=True) + url = f'https://download.pytorch.org/models/{checkpoint}' + hub.load_state_dict_from_url(url, save_dir) + + # Get pretrained state dict + pretrained_dict = torch.load(self.pretrained_path) + pretrained_dict = self.filter_pretrained_dict(model_dict=model_dict, + pretrained_dict=pretrained_dict) + + # Update current model state dict + model_dict.update(pretrained_dict) + model.load_state_dict(model_dict) + + return model + + def filter_pretrained_dict(self, model_dict, pretrained_dict): + """ + Removes layers from pretrained state dict that are not used or changed in model + Args: + model_dict: dict, Default model state dictionary + pretrained_dict: dict, Pretrained model state dictionary + Returns: + pretrained_dict: dict, Pretrained model state dictionary with removed weights + """ + # Removes aux classifier weights if not used + if "aux_classifier.0.weight" in pretrained_dict and "aux_classifier.0.weight" not in model_dict: + pretrained_dict = {key: value for key, value in pretrained_dict.items() + if "aux_classifier" not in key} + + # Removes final conv layer from weights if number of classes are different + model_num_classes = model_dict["classifier.4.weight"].shape[0] + pretrained_num_classes = pretrained_dict["classifier.4.weight"].shape[0] + if model_num_classes != pretrained_num_classes: + pretrained_dict.pop("classifier.4.weight") + pretrained_dict.pop("classifier.4.bias") + + return pretrained_dict + + def forward(self, images): + """ + Forward pass + Args: + images: (N, 3, H_in, W_in), Input images + Returns + result: dict[torch.Tensor], Depth distribution result + features: (N, C, H_out, W_out), Image features + logits: (N, num_classes, H_out, W_out), Classification logits + aux: (N, num_classes, H_out, W_out), Auxillary classification logits + """ + # Preprocess images + x = self.preprocess(images) + + # Extract features + result = OrderedDict() + features = self.model.backbone(x) + result['features'] = features['features'] + feat_shape = features['features'].shape[-2:] + + # Prediction classification logits + x = features["out"] + x = self.model.classifier(x) + x = F.interpolate(x, size=feat_shape, mode='bilinear', align_corners=False) + result["logits"] = x + + # Prediction auxillary classification logits + if self.model.aux_classifier is not None: + x = features["aux"] + x = self.model.aux_classifier(x) + x = F.interpolate(x, size=feat_shape, mode='bilinear', align_corners=False) + result["aux"] = x + + return result + + def preprocess(self, images): + """ + Preprocess images + Args: + images: (N, 3, H, W), Input images + Return + x: (N, 3, H, W), Preprocessed images + """ + x = images + if self.pretrained: + # Create a mask for padded pixels + mask = (x == 0) + + # Match ResNet pretrained preprocessing + x = normalize(x, mean=self.norm_mean, std=self.norm_std) + + # Make padded pixels = 0 + x[mask] = 0 + + return x diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/__init__.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/__init__.py new file mode 100644 index 000000000..0ff8b2b5b --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/__init__.py @@ -0,0 +1,5 @@ +from .ddn_loss import DDNLoss + +__all__ = { + "DDNLoss": DDNLoss +} diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/balancer.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/balancer.py new file mode 100644 index 000000000..47bf8d486 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/balancer.py @@ -0,0 +1,50 @@ +import torch +import torch.nn as nn + +from pcdet.utils import loss_utils + + +class Balancer(nn.Module): + def __init__(self, fg_weight, bg_weight, downsample_factor=1): + """ + Initialize fixed foreground/background loss balancer + Args: + fg_weight: float, Foreground loss weight + bg_weight: float, Background loss weight + downsample_factor: int, Depth map downsample factor + """ + super().__init__() + self.fg_weight = fg_weight + self.bg_weight = bg_weight + self.downsample_factor = downsample_factor + + def forward(self, loss, gt_boxes2d): + """ + Forward pass + Args: + loss: (B, H, W), Pixel-wise loss + gt_boxes2d: (B, N, 4), 2D box labels for foreground/background balancing + Returns: + loss: (1), Total loss after foreground/background balancing + tb_dict: dict[float], All losses to log in tensorboard + """ + # Compute masks + fg_mask = loss_utils.compute_fg_mask(gt_boxes2d=gt_boxes2d, + shape=loss.shape, + downsample_factor=self.downsample_factor, + device=loss.device) + bg_mask = ~fg_mask + + # Compute balancing weights + weights = self.fg_weight * fg_mask + self.bg_weight * bg_mask + num_pixels = fg_mask.sum() + bg_mask.sum() + + # Compute losses + loss *= weights + fg_loss = loss[fg_mask].sum() / num_pixels + bg_loss = loss[bg_mask].sum() / num_pixels + + # Get total loss + loss = fg_loss + bg_loss + tb_dict = {"balancer_loss": loss.item(), "fg_loss": fg_loss.item(), "bg_loss": bg_loss.item()} + return loss, tb_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/ddn_loss.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/ddn_loss.py new file mode 100644 index 000000000..f59c11f57 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/ddn_loss/ddn_loss.py @@ -0,0 +1,75 @@ +import torch +import torch.nn as nn + + +from .balancer import Balancer +from pcdet.utils import transform_utils + +try: + from kornia.losses.focal import FocalLoss +except: + pass + # print('Warning: kornia is not installed. This package is only required by CaDDN') + + +class DDNLoss(nn.Module): + + def __init__(self, + weight, + alpha, + gamma, + disc_cfg, + fg_weight, + bg_weight, + downsample_factor): + """ + Initializes DDNLoss module + Args: + weight: float, Loss function weight + alpha: float, Alpha value for Focal Loss + gamma: float, Gamma value for Focal Loss + disc_cfg: dict, Depth discretiziation configuration + fg_weight: float, Foreground loss weight + bg_weight: float, Background loss weight + downsample_factor: int, Depth map downsample factor + """ + super().__init__() + self.device = torch.cuda.current_device() + self.disc_cfg = disc_cfg + self.balancer = Balancer(downsample_factor=downsample_factor, + fg_weight=fg_weight, + bg_weight=bg_weight) + + # Set loss function + self.alpha = alpha + self.gamma = gamma + self.loss_func = FocalLoss(alpha=self.alpha, gamma=self.gamma, reduction="none") + self.weight = weight + + def forward(self, depth_logits, depth_maps, gt_boxes2d): + """ + Gets DDN loss + Args: + depth_logits: (B, D+1, H, W), Predicted depth logits + depth_maps: (B, H, W), Depth map [m] + gt_boxes2d: torch.Tensor (B, N, 4), 2D box labels for foreground/background balancing + Returns: + loss: (1), Depth distribution network loss + tb_dict: dict[float], All losses to log in tensorboard + """ + tb_dict = {} + + # Bin depth map to create target + depth_target = transform_utils.bin_depths(depth_maps, **self.disc_cfg, target=True) + + # Compute loss + loss = self.loss_func(depth_logits, depth_target) + + # Compute foreground/background balancing + loss, tb_dict = self.balancer(loss=loss, gt_boxes2d=gt_boxes2d) + + # Final loss + loss *= self.weight + tb_dict.update({"ddn_loss": loss.item()}) + + return loss, tb_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/depth_ffn.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/depth_ffn.py new file mode 100644 index 000000000..c35e22927 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/image_vfe_modules/ffn/depth_ffn.py @@ -0,0 +1,103 @@ +import torch.nn as nn +import torch.nn.functional as F + +from . import ddn, ddn_loss +from pcdet.models.model_utils.basic_block_2d import BasicBlock2D + + +class DepthFFN(nn.Module): + + def __init__(self, model_cfg, downsample_factor): + """ + Initialize frustum feature network via depth distribution estimation + Args: + model_cfg: EasyDict, Depth classification network config + downsample_factor: int, Depth map downsample factor + """ + super().__init__() + self.model_cfg = model_cfg + self.disc_cfg = model_cfg.DISCRETIZE + self.downsample_factor = downsample_factor + + # Create modules + self.ddn = ddn.__all__[model_cfg.DDN.NAME]( + num_classes=self.disc_cfg["num_bins"] + 1, + backbone_name=model_cfg.DDN.BACKBONE_NAME, + **model_cfg.DDN.ARGS + ) + self.channel_reduce = BasicBlock2D(**model_cfg.CHANNEL_REDUCE) + self.ddn_loss = ddn_loss.__all__[model_cfg.LOSS.NAME]( + disc_cfg=self.disc_cfg, + downsample_factor=downsample_factor, + **model_cfg.LOSS.ARGS + ) + self.forward_ret_dict = {} + + def get_output_feature_dim(self): + return self.channel_reduce.out_channels + + def forward(self, batch_dict): + """ + Predicts depths and creates image depth feature volume using depth distributions + Args: + batch_dict: + images: (N, 3, H_in, W_in), Input images + Returns: + batch_dict: + frustum_features: (N, C, D, H_out, W_out), Image depth features + """ + # Pixel-wise depth classification + images = batch_dict["images"] + ddn_result = self.ddn(images) + image_features = ddn_result["features"] + depth_logits = ddn_result["logits"] + + # Channel reduce + if self.channel_reduce is not None: + image_features = self.channel_reduce(image_features) + + # Create image feature plane-sweep volume + frustum_features = self.create_frustum_features(image_features=image_features, + depth_logits=depth_logits) + batch_dict["frustum_features"] = frustum_features + + if self.training: + self.forward_ret_dict["depth_maps"] = batch_dict["depth_maps"] + self.forward_ret_dict["gt_boxes2d"] = batch_dict["gt_boxes2d"] + self.forward_ret_dict["depth_logits"] = depth_logits + return batch_dict + + def create_frustum_features(self, image_features, depth_logits): + """ + Create image depth feature volume by multiplying image features with depth distributions + Args: + image_features: (N, C, H, W), Image features + depth_logits: (N, D+1, H, W), Depth classification logits + Returns: + frustum_features: (N, C, D, H, W), Image features + """ + channel_dim = 1 + depth_dim = 2 + + # Resize to match dimensions + image_features = image_features.unsqueeze(depth_dim) + depth_logits = depth_logits.unsqueeze(channel_dim) + + # Apply softmax along depth axis and remove last depth category (> Max Range) + depth_probs = F.softmax(depth_logits, dim=depth_dim) + depth_probs = depth_probs[:, :, :-1] + + # Multiply to form image depth feature volume + frustum_features = depth_probs * image_features + return frustum_features + + def get_loss(self): + """ + Gets DDN loss + Args: + Returns: + loss: (1), Depth distribution network loss + tb_dict: dict[float], All losses to log in tensorboard + """ + loss, tb_dict = self.ddn_loss(**self.forward_ret_dict) + return loss, tb_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/mean_vfe.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/mean_vfe.py new file mode 100644 index 000000000..42bd21ff3 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/mean_vfe.py @@ -0,0 +1,31 @@ +import torch + +from .vfe_template import VFETemplate + + +class MeanVFE(VFETemplate): + def __init__(self, model_cfg, num_point_features, **kwargs): + super().__init__(model_cfg=model_cfg) + self.num_point_features = num_point_features + + def get_output_feature_dim(self): + return self.num_point_features + + def forward(self, batch_dict, **kwargs): + """ + Args: + batch_dict: + voxels: (num_voxels, max_points_per_voxel, C) + voxel_num_points: optional (num_voxels) + **kwargs: + + Returns: + vfe_features: (num_voxels, C) + """ + voxel_features, voxel_num_points = batch_dict['voxels'], batch_dict['voxel_num_points'] + points_mean = voxel_features[:, :, :].sum(dim=1, keepdim=False) + normalizer = torch.clamp_min(voxel_num_points.view(-1, 1), min=1.0).type_as(voxel_features) + points_mean = points_mean / normalizer + batch_dict['voxel_features'] = points_mean.contiguous() + + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/pillar_vfe.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/pillar_vfe.py new file mode 100644 index 000000000..a162a83e8 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/pillar_vfe.py @@ -0,0 +1,123 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .vfe_template import VFETemplate + + +class PFNLayer(nn.Module): + def __init__(self, + in_channels, + out_channels, + use_norm=True, + last_layer=False): + super().__init__() + + self.last_vfe = last_layer + self.use_norm = use_norm + if not self.last_vfe: + out_channels = out_channels // 2 + + if self.use_norm: + self.linear = nn.Linear(in_channels, out_channels, bias=False) + self.norm = nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01) + else: + self.linear = nn.Linear(in_channels, out_channels, bias=True) + + self.part = 50000 + + def forward(self, inputs): + if inputs.shape[0] > self.part: + # nn.Linear performs randomly when batch size is too large + num_parts = inputs.shape[0] // self.part + part_linear_out = [self.linear(inputs[num_part*self.part:(num_part+1)*self.part]) + for num_part in range(num_parts+1)] + x = torch.cat(part_linear_out, dim=0) + else: + x = self.linear(inputs) + torch.backends.cudnn.enabled = False + x = self.norm(x.permute(0, 2, 1)).permute(0, 2, 1) if self.use_norm else x + torch.backends.cudnn.enabled = True + x = F.relu(x) + x_max = torch.max(x, dim=1, keepdim=True)[0] + + if self.last_vfe: + return x_max + else: + x_repeat = x_max.repeat(1, inputs.shape[1], 1) + x_concatenated = torch.cat([x, x_repeat], dim=2) + return x_concatenated + + +class PillarVFE(VFETemplate): + def __init__(self, model_cfg, num_point_features, voxel_size, point_cloud_range, **kwargs): + super().__init__(model_cfg=model_cfg) + + self.use_norm = self.model_cfg.USE_NORM + self.with_distance = self.model_cfg.WITH_DISTANCE + self.use_absolute_xyz = self.model_cfg.USE_ABSLOTE_XYZ + num_point_features += 6 if self.use_absolute_xyz else 3 + if self.with_distance: + num_point_features += 1 + + self.num_filters = self.model_cfg.NUM_FILTERS + assert len(self.num_filters) > 0 + num_filters = [num_point_features] + list(self.num_filters) + + pfn_layers = [] + for i in range(len(num_filters) - 1): + in_filters = num_filters[i] + out_filters = num_filters[i + 1] + pfn_layers.append( + PFNLayer(in_filters, out_filters, self.use_norm, last_layer=(i >= len(num_filters) - 2)) + ) + self.pfn_layers = nn.ModuleList(pfn_layers) + + self.voxel_x = voxel_size[0] + self.voxel_y = voxel_size[1] + self.voxel_z = voxel_size[2] + self.x_offset = self.voxel_x / 2 + point_cloud_range[0] + self.y_offset = self.voxel_y / 2 + point_cloud_range[1] + self.z_offset = self.voxel_z / 2 + point_cloud_range[2] + + def get_output_feature_dim(self): + return self.num_filters[-1] + + def get_paddings_indicator(self, actual_num, max_num, axis=0): + actual_num = torch.unsqueeze(actual_num, axis + 1) + max_num_shape = [1] * len(actual_num.shape) + max_num_shape[axis + 1] = -1 + max_num = torch.arange(max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape) + paddings_indicator = actual_num.int() > max_num + return paddings_indicator + + def forward(self, batch_dict, **kwargs): + + voxel_features, voxel_num_points, coords = batch_dict['voxels'], batch_dict['voxel_num_points'], batch_dict['voxel_coords'] + points_mean = voxel_features[:, :, :3].sum(dim=1, keepdim=True) / voxel_num_points.type_as(voxel_features).view(-1, 1, 1) + f_cluster = voxel_features[:, :, :3] - points_mean + + f_center = torch.zeros_like(voxel_features[:, :, :3]) + f_center[:, :, 0] = voxel_features[:, :, 0] - (coords[:, 3].to(voxel_features.dtype).unsqueeze(1) * self.voxel_x + self.x_offset) + f_center[:, :, 1] = voxel_features[:, :, 1] - (coords[:, 2].to(voxel_features.dtype).unsqueeze(1) * self.voxel_y + self.y_offset) + f_center[:, :, 2] = voxel_features[:, :, 2] - (coords[:, 1].to(voxel_features.dtype).unsqueeze(1) * self.voxel_z + self.z_offset) + + if self.use_absolute_xyz: + features = [voxel_features, f_cluster, f_center] + else: + features = [voxel_features[..., 3:], f_cluster, f_center] + + if self.with_distance: + points_dist = torch.norm(voxel_features[:, :, :3], 2, 2, keepdim=True) + features.append(points_dist) + features = torch.cat(features, dim=-1) + + voxel_count = features.shape[1] + mask = self.get_paddings_indicator(voxel_num_points, voxel_count, axis=0) + mask = torch.unsqueeze(mask, -1).type_as(voxel_features) + features *= mask + for pfn in self.pfn_layers: + features = pfn(features) + features = features.squeeze() + batch_dict['pillar_features'] = features + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/vfe_template.py b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/vfe_template.py new file mode 100644 index 000000000..a862e3e54 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_3d/vfe/vfe_template.py @@ -0,0 +1,22 @@ +import torch.nn as nn + + +class VFETemplate(nn.Module): + def __init__(self, model_cfg, **kwargs): + super().__init__() + self.model_cfg = model_cfg + + def get_output_feature_dim(self): + raise NotImplementedError + + def forward(self, **kwargs): + """ + Args: + **kwargs: + + Returns: + batch_dict: + ... + vfe_features: (num_voxels, C) + """ + raise NotImplementedError diff --git a/toolbox/openpcdet/pcdet/models/backbones_image/__init__.py b/toolbox/openpcdet/pcdet/models/backbones_image/__init__.py new file mode 100644 index 000000000..d9b0e68da --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_image/__init__.py @@ -0,0 +1,4 @@ +from .swin import SwinTransformer +__all__ = { + 'SwinTransformer':SwinTransformer, +} \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/models/backbones_image/img_neck/__init__.py b/toolbox/openpcdet/pcdet/models/backbones_image/img_neck/__init__.py new file mode 100644 index 000000000..25e468068 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_image/img_neck/__init__.py @@ -0,0 +1,4 @@ +from .generalized_lss import GeneralizedLSSFPN +__all__ = { + 'GeneralizedLSSFPN':GeneralizedLSSFPN, +} \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/models/backbones_image/img_neck/generalized_lss.py b/toolbox/openpcdet/pcdet/models/backbones_image/img_neck/generalized_lss.py new file mode 100644 index 000000000..399b0348b --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_image/img_neck/generalized_lss.py @@ -0,0 +1,76 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from ...model_utils.basic_block_2d import BasicBlock2D + + +class GeneralizedLSSFPN(nn.Module): + """ + This module implements FPN, which creates pyramid features built on top of some input feature maps. + This code is adapted from https://github.com/open-mmlab/mmdetection/blob/main/mmdet/models/necks/fpn.py with minimal modifications. + """ + def __init__(self, model_cfg): + super().__init__() + self.model_cfg = model_cfg + in_channels = self.model_cfg.IN_CHANNELS + out_channels = self.model_cfg.OUT_CHANNELS + num_ins = len(in_channels) + num_outs = self.model_cfg.NUM_OUTS + start_level = self.model_cfg.START_LEVEL + end_level = self.model_cfg.END_LEVEL + + self.in_channels = in_channels + + if end_level == -1: + self.backbone_end_level = num_ins - 1 + else: + self.backbone_end_level = end_level + assert end_level <= len(in_channels) + assert num_outs == end_level - start_level + self.start_level = start_level + self.end_level = end_level + + self.lateral_convs = nn.ModuleList() + self.fpn_convs = nn.ModuleList() + + for i in range(self.start_level, self.backbone_end_level): + l_conv = BasicBlock2D( + in_channels[i] + (in_channels[i + 1] if i == self.backbone_end_level - 1 else out_channels), + out_channels, kernel_size=1, bias = False + ) + fpn_conv = BasicBlock2D(out_channels,out_channels, kernel_size=3, padding=1, bias = False) + self.lateral_convs.append(l_conv) + self.fpn_convs.append(fpn_conv) + + def forward(self, batch_dict): + """ + Args: + batch_dict: + image_features (list[tensor]): Multi-stage features from image backbone. + Returns: + batch_dict: + image_fpn (list(tensor)): FPN features. + """ + # upsample -> cat -> conv1x1 -> conv3x3 + inputs = batch_dict['image_features'] + assert len(inputs) == len(self.in_channels) + + # build laterals + laterals = [inputs[i + self.start_level] for i in range(len(inputs))] + + # build top-down path + used_backbone_levels = len(laterals) - 1 + for i in range(used_backbone_levels - 1, -1, -1): + x = F.interpolate( + laterals[i + 1], + size=laterals[i].shape[2:], + mode='bilinear', align_corners=False, + ) + laterals[i] = torch.cat([laterals[i], x], dim=1) + laterals[i] = self.lateral_convs[i](laterals[i]) + laterals[i] = self.fpn_convs[i](laterals[i]) + + # build outputs + outs = [laterals[i] for i in range(used_backbone_levels)] + batch_dict['image_fpn'] = tuple(outs) + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/backbones_image/swin.py b/toolbox/openpcdet/pcdet/models/backbones_image/swin.py new file mode 100644 index 000000000..d428c2700 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/backbones_image/swin.py @@ -0,0 +1,736 @@ +# Copyright (c) OpenMMLab. All rights reserved. +""" +Mostly copy-paste from + https://github.com/open-mmlab/mmdetection/blob/main/mmdet/models/backbones/swin.py + +""" + +import warnings +from collections import OrderedDict +from copy import deepcopy + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp + +from ..model_utils.swin_utils import swin_converter +from ..model_utils.swin_utils import PatchEmbed, PatchMerging +from ..model_utils.swin_utils import FFN, DropPath, to_2tuple, trunc_normal_, trunc_normal_init, constant_init + + +class WindowMSA(nn.Module): + """Window based multi-head self-attention (W-MSA) module with relative + position bias. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (tuple[int]): The height and width of the window. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0.0 + proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + qkv_bias=True, + qk_scale=None, + attn_drop_rate=0., + proj_drop_rate=0.): + + super().__init__() + self._is_init = False + + self.embed_dims = embed_dims + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_embed_dims = embed_dims // num_heads + self.scale = qk_scale or head_embed_dims**-0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), + num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # About 2x faster than original impl + Wh, Ww = self.window_size + rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww) + rel_position_index = rel_index_coords + rel_index_coords.T + rel_position_index = rel_position_index.flip(1).contiguous() + self.register_buffer('relative_position_index', rel_position_index) + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop_rate) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop_rate) + + self.softmax = nn.Softmax(dim=-1) + + def init_weights(self): + trunc_normal_(self.relative_position_bias_table, std=0.02) + + def forward(self, x, mask=None): + """ + Args: + + x (tensor): input features with shape of (num_windows*B, N, C) + mask (tensor | None, Optional): mask with shape of (num_windows, + Wh*Ww, Wh*Ww), value should be between (-inf, 0]. + """ + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, 4) + # make torchscript happy (cannot use tensor as tuple) + q, k, v = qkv[0], qkv[1], qkv[2] + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], + -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B // nW, nW, self.num_heads, N, + N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @staticmethod + def double_step_seq(step1, len1, step2, len2): + seq1 = torch.arange(0, step1 * len1, step1) + seq2 = torch.arange(0, step2 * len2, step2) + return (seq1[:, None] + seq2[None, :]).reshape(1, -1) + + +class ShiftWindowMSA(nn.Module): + """Shifted Window Multihead Self-Attention Module. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. + shift_size (int, optional): The shift step of each window towards + right-bottom. If zero, act as regular window-msa. Defaults to 0. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: True + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Defaults: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Defaults: 0. + proj_drop_rate (float, optional): Dropout ratio of output. + Defaults: 0. + dropout_layer (dict, optional): The dropout_layer used before output. + Defaults: dict(type='DropPath', drop_prob=0.). + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + shift_size=0, + qkv_bias=True, + qk_scale=None, + attn_drop_rate=0, + proj_drop_rate=0, + dropout_layer=dict(type='DropPath', drop_prob=0.)): + super().__init__() + self._is_init = False + + self.window_size = window_size + self.shift_size = shift_size + assert 0 <= self.shift_size < self.window_size + + self.w_msa = WindowMSA( + embed_dims=embed_dims, + num_heads=num_heads, + window_size=to_2tuple(window_size), + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop_rate=attn_drop_rate, + proj_drop_rate=proj_drop_rate,) + self.drop = DropPath(dropout_layer['drop_prob']) + + def forward(self, query, hw_shape): + B, L, C = query.shape + H, W = hw_shape + assert L == H * W, 'input feature has wrong size' + query = query.view(B, H, W, C) + + # pad feature maps to multiples of window size + pad_r = (self.window_size - W % self.window_size) % self.window_size + pad_b = (self.window_size - H % self.window_size) % self.window_size + query = F.pad(query, (0, 0, 0, pad_r, 0, pad_b)) + H_pad, W_pad = query.shape[1], query.shape[2] + + # cyclic shift + if self.shift_size > 0: + shifted_query = torch.roll( + query, + shifts=(-self.shift_size, -self.shift_size), + dims=(1, 2)) + + # calculate attention mask for SW-MSA + img_mask = torch.zeros((1, H_pad, W_pad, 1), device=query.device) + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, + -self.shift_size), slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, + -self.shift_size), slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + # nW, window_size, window_size, 1 + mask_windows = self.window_partition(img_mask) + mask_windows = mask_windows.view( + -1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, + float(-100.0)).masked_fill( + attn_mask == 0, float(0.0)) + else: + shifted_query = query + attn_mask = None + + # nW*B, window_size, window_size, C + query_windows = self.window_partition(shifted_query) + # nW*B, window_size*window_size, C + query_windows = query_windows.view(-1, self.window_size**2, C) + + # W-MSA/SW-MSA (nW*B, window_size*window_size, C) + attn_windows = self.w_msa(query_windows, mask=attn_mask) + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, + self.window_size, C) + + # B H' W' C + shifted_x = self.window_reverse(attn_windows, H_pad, W_pad) + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll( + shifted_x, + shifts=(self.shift_size, self.shift_size), + dims=(1, 2)) + else: + x = shifted_x + + if pad_r > 0 or pad_b: + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + + x = self.drop(x) + return x + + def window_reverse(self, windows, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + H (int): Height of image + W (int): Width of image + Returns: + x: (B, H, W, C) + """ + window_size = self.window_size + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, + window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + def window_partition(self, x): + """ + Args: + x: (B, H, W, C) + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + window_size = self.window_size + x = x.view(B, H // window_size, window_size, W // window_size, + window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous() + windows = windows.view(-1, window_size, window_size, C) + return windows + + +class SwinBlock(nn.Module): + """" + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + window_size (int, optional): The local window scale. Default: 7. + shift (bool, optional): whether to shift window or not. Default False. + qkv_bias (bool, optional): enable bias for qkv if True. Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + drop_rate (float, optional): Dropout rate. Default: 0. + attn_drop_rate (float, optional): Attention dropout rate. Default: 0. + drop_path_rate (float, optional): Stochastic depth rate. Default: 0. + act_cfg (dict, optional): The config dict of activation function. + Default: dict(type='GELU'). + norm_cfg (dict, optional): The config dict of normalization. + Default: dict(type='LN'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + window_size=7, + shift=False, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + with_cp=False,): + super(SwinBlock, self).__init__() + self._is_init = False + + self.with_cp = with_cp + + self.norm1 = nn.LayerNorm(embed_dims) + self.attn = ShiftWindowMSA( + embed_dims=embed_dims, + num_heads=num_heads, + window_size=window_size, + shift_size=window_size // 2 if shift else 0, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop_rate=attn_drop_rate, + proj_drop_rate=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),) + + self.norm2 = nn.LayerNorm(embed_dims) + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=2, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg, + add_identity=True,) + + def forward(self, x, hw_shape): + + def _inner_forward(x): + identity = x + x = self.norm1(x) + x = self.attn(x, hw_shape) + + x = x + identity + + identity = x + x = self.norm2(x) + x = self.ffn(x, identity=identity) + + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + + return x + + +class SwinBlockSequence(nn.Module): + """Implements one stage in Swin Transformer. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + depth (int): The number of blocks in this stage. + window_size (int, optional): The local window scale. Default: 7. + qkv_bias (bool, optional): enable bias for qkv if True. Default: True. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + drop_rate (float, optional): Dropout rate. Default: 0. + attn_drop_rate (float, optional): Attention dropout rate. Default: 0. + drop_path_rate (float | list[float], optional): Stochastic depth + rate. Default: 0. + downsample (BaseModule | None, optional): The downsample operation + module. Default: None. + act_cfg (dict, optional): The config dict of activation function. + Default: dict(type='GELU'). + norm_cfg (dict, optional): The config dict of normalization. + Default: dict(type='LN'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + depth, + window_size=7, + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + downsample=None, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + with_cp=False): + super().__init__() + self._is_init = False + + if isinstance(drop_path_rate, list): + drop_path_rates = drop_path_rate + assert len(drop_path_rates) == depth + else: + drop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)] + + self.blocks = nn.ModuleList() + for i in range(depth): + block = SwinBlock( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=feedforward_channels, + window_size=window_size, + shift=False if i % 2 == 0 else True, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rates[i], + act_cfg=act_cfg, + norm_cfg=norm_cfg, + with_cp=with_cp,) + self.blocks.append(block) + + self.downsample = downsample + + def forward(self, x, hw_shape): + for block in self.blocks: + x = block(x, hw_shape) + + if self.downsample: + x_down, down_hw_shape = self.downsample(x, hw_shape) + return x_down, down_hw_shape, x, hw_shape + else: + return x, hw_shape, x, hw_shape + + +class SwinTransformer(nn.Module): + """ Swin Transformer + A PyTorch implement of : `Swin Transformer: + Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/abs/2103.14030 + + This code is adapted from https://github.com/open-mmlab/mmdetection/blob/main/mmdet/models/backbones/swin.py + with minimal modifications. + + Args: + pretrain_img_size (int | tuple[int]): The size of input image when + pretrain. Defaults: 224. + in_channels (int): The num of input channels. + Defaults: 3. + embed_dims (int): The feature dimension. Default: 96. + patch_size (int | tuple[int]): Patch size. Default: 4. + window_size (int): Window size. Default: 7. + mlp_ratio (int): Ratio of mlp hidden dim to embedding dim. + Default: 4. + depths (tuple[int]): Depths of each Swin Transformer stage. + Default: (2, 2, 6, 2). + num_heads (tuple[int]): Parallel attention heads of each Swin + Transformer stage. Default: (3, 6, 12, 24). + strides (tuple[int]): The patch merging or patch embedding stride of + each Swin Transformer stage. (In swin, we set kernel size equal to + stride.) Default: (4, 2, 2, 2). + out_indices (tuple[int]): Output from which stages. + Default: (0, 1, 2, 3). + qkv_bias (bool, optional): If True, add a learnable bias to query, key, + value. Default: True + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + patch_norm (bool): If add a norm layer for patch embed and patch + merging. Default: True. + drop_rate (float): Dropout rate. Defaults: 0. + attn_drop_rate (float): Attention dropout rate. Default: 0. + drop_path_rate (float): Stochastic depth rate. Defaults: 0.1. + use_abs_pos_embed (bool): If True, add absolute position embedding to + the patch embedding. Defaults: False. + act_cfg (dict): Config dict for activation layer. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer at + output of backone. Defaults: dict(type='LN'). + with_cp (bool, optional): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + pretrained (str, optional): model pretrained path. Default: None. + convert_weights (bool): The flag indicates whether the + pre-trained model is from the original repo. We may need + to convert some keys to make it compatible. + Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + Default: -1 (-1 means not freezing any parameters). + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, model_cfg): + + self.model_cfg = model_cfg + pretrain_img_size = self.model_cfg.get('PRETRAIN_IMG_SIZE', 224) + init_cfg = self.model_cfg.get('INIT_CFG', None) + depths = self.model_cfg.DEPTHS + in_channels = self.model_cfg.get('IN_CHANNELS', 3) + strides = self.model_cfg.get('STRIDES', (4, 2, 2, 2)) + patch_size = self.model_cfg.get('PATCH_SIZE', 4) + embed_dims = self.model_cfg.EMBED_DIMS + num_heads = self.model_cfg.NUM_HEADS + window_size = self.model_cfg.WINDOW_SIZE + mlp_ratio = self.model_cfg.MLP_RATIO + qkv_bias = self.model_cfg.get('QKV_BIAS', True) + qk_scale = self.model_cfg.get('QK_SCALE', None) + drop_rate = self.model_cfg.DROP_RATE + attn_drop_rate = self.model_cfg.ATTN_DROP_RATE + drop_path_rate = self.model_cfg.DROP_PATH_RATE + patch_norm = self.model_cfg.get('PATCH_NORM', True) + out_indices = self.model_cfg.get('OUT_INDICES', [0, 1, 2, 3]) + with_cp = self.model_cfg.get('WITH_CP', False) + use_abs_pos_embed = self.model_cfg.get('USE_ABS_POS_EMBED', False) + act_cfg=dict(type='GELU') + norm_cfg=dict(type='LN') + + self.convert_weights = self.model_cfg.get('CONVERT_WEIGHTS', False) + self.frozen_stages = self.model_cfg.get('FROZEN_STAGES', -1) + + if isinstance(pretrain_img_size, int): + pretrain_img_size = to_2tuple(pretrain_img_size) + elif isinstance(pretrain_img_size, tuple): + if len(pretrain_img_size) == 1: + pretrain_img_size = to_2tuple(pretrain_img_size[0]) + assert len(pretrain_img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(pretrain_img_size)}' + + super(SwinTransformer, self).__init__() + self.init_cfg = init_cfg + + num_layers = len(depths) + self.out_indices = out_indices + self.use_abs_pos_embed = use_abs_pos_embed + + assert strides[0] == patch_size, 'Use non-overlapping patch embed.' + + self.patch_embed = PatchEmbed( + in_channels=in_channels, + embed_dims=embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=strides[0], + norm_cfg=norm_cfg if patch_norm else None) + + if self.use_abs_pos_embed: + patch_row = pretrain_img_size[0] // patch_size + patch_col = pretrain_img_size[1] // patch_size + num_patches = patch_row * patch_col + self.absolute_pos_embed = nn.Parameter( + torch.zeros((1, num_patches, embed_dims))) + + self.drop_after_pos = nn.Dropout(p=drop_rate) + + # set stochastic depth decay rule + total_depth = sum(depths) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] + + self.stages = nn.ModuleList() + in_channels = embed_dims + for i in range(num_layers): + if i < num_layers - 1: + downsample = PatchMerging( + in_channels=in_channels, + out_channels=2 * in_channels, + stride=strides[i + 1], + norm_cfg=norm_cfg if patch_norm else None) + else: + downsample = None + + stage = SwinBlockSequence( + embed_dims=in_channels, + num_heads=num_heads[i], + feedforward_channels=mlp_ratio * in_channels, + depth=depths[i], + window_size=window_size, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dpr[sum(depths[:i]):sum(depths[:i + 1])], + downsample=downsample, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + with_cp=with_cp) + self.stages.append(stage) + if downsample: + in_channels = downsample.out_channels + + self.num_features = [int(embed_dims * 2**i) for i in range(num_layers)] + # Add a norm layer for each output + for i in out_indices: + layer = nn.LayerNorm(self.num_features[i]) + layer_name = f'norm{i}' + self.add_module(layer_name, layer) + + def train(self, mode=True): + """Convert the model into training mode while keep layers freezed.""" + super(SwinTransformer, self).train(mode) + self._freeze_stages() + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + if self.use_abs_pos_embed: + self.absolute_pos_embed.requires_grad = False + self.drop_after_pos.eval() + + for i in range(1, self.frozen_stages + 1): + + if (i - 1) in self.out_indices: + norm_layer = getattr(self, f'norm{i-1}') + norm_layer.eval() + for param in norm_layer.parameters(): + param.requires_grad = False + + m = self.stages[i - 1] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + if self.init_cfg is None: + print(f'No pre-trained weights for ' + f'{self.__class__.__name__}, ' + f'training start from scratch') + if self.use_abs_pos_embed: + trunc_normal_(self.absolute_pos_embed, std=0.02) + for m in self.modules(): + if isinstance(m, nn.Linear): + trunc_normal_init(m, std=.02, bias=0.) + elif isinstance(m, nn.LayerNorm): + constant_init(m, 1.0) + else: + assert 'checkpoint' in self.init_cfg, f'Only support ' \ + f'specify `Pretrained` in ' \ + f'`init_cfg` in ' \ + f'{self.__class__.__name__} ' + ckpt = torch.load(self.init_cfg.checkpoint, map_location='cpu') + if 'state_dict' in ckpt: + _state_dict = ckpt['state_dict'] + elif 'model' in ckpt: + _state_dict = ckpt['model'] + else: + _state_dict = ckpt + if self.convert_weights: + # supported loading weight from original repo, + _state_dict = swin_converter(_state_dict) + + state_dict = OrderedDict() + for k, v in _state_dict.items(): + if k.startswith('backbone.'): + state_dict[k[9:]] = v + + # strip prefix of state_dict + if list(state_dict.keys())[0].startswith('module.'): + state_dict = {k[7:]: v for k, v in state_dict.items()} + + # reshape absolute position embedding + if state_dict.get('absolute_pos_embed') is not None: + absolute_pos_embed = state_dict['absolute_pos_embed'] + N1, L, C1 = absolute_pos_embed.size() + N2, C2, H, W = self.absolute_pos_embed.size() + if N1 != N2 or C1 != C2 or L != H * W: + print('Error in loading absolute_pos_embed, pass') + else: + state_dict['absolute_pos_embed'] = absolute_pos_embed.view( + N2, H, W, C2).permute(0, 3, 1, 2).contiguous() + + # interpolate position bias table if needed + relative_position_bias_table_keys = [ + k for k in state_dict.keys() + if 'relative_position_bias_table' in k + ] + for table_key in relative_position_bias_table_keys: + table_pretrained = state_dict[table_key] + table_current = self.state_dict()[table_key] + L1, nH1 = table_pretrained.size() + L2, nH2 = table_current.size() + if nH1 != nH2: + print(f'Error in loading {table_key}, pass') + elif L1 != L2: + S1 = int(L1**0.5) + S2 = int(L2**0.5) + table_pretrained_resized = F.interpolate( + table_pretrained.permute(1, 0).reshape(1, nH1, S1, S1), + size=(S2, S2), + mode='bicubic') + state_dict[table_key] = table_pretrained_resized.view( + nH2, L2).permute(1, 0).contiguous() + + # load state_dict + self.load_state_dict(state_dict, False) + + def forward(self, batch_dict): + x = batch_dict['camera_imgs'] + B, N, C, H, W = x.size() + x = x.view(B * N, C, H, W) + x, hw_shape = self.patch_embed(x) + + if self.use_abs_pos_embed: + x = x + self.absolute_pos_embed + x = self.drop_after_pos(x) + + outs = [] + for i, stage in enumerate(self.stages): + x, hw_shape, out, out_hw_shape = stage(x, hw_shape) + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + out = norm_layer(out) + out = out.view(-1, *out_hw_shape, + self.num_features[i]).permute(0, 3, 1, + 2).contiguous() + outs.append(out) + batch_dict['image_features'] = outs + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/dense_heads/__init__.py b/toolbox/openpcdet/pcdet/models/dense_heads/__init__.py new file mode 100644 index 000000000..3bb33de36 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/dense_heads/__init__.py @@ -0,0 +1,21 @@ +from .anchor_head_multi import AnchorHeadMulti +from .anchor_head_single import AnchorHeadSingle +from .anchor_head_template import AnchorHeadTemplate +from .point_head_box import PointHeadBox +from .point_head_simple import PointHeadSimple +from .point_intra_part_head import PointIntraPartOffsetHead +from .center_head import CenterHead +from .voxelnext_head import VoxelNeXtHead +from .transfusion_head import TransFusionHead + +__all__ = { + 'AnchorHeadTemplate': AnchorHeadTemplate, + 'AnchorHeadSingle': AnchorHeadSingle, + 'PointIntraPartOffsetHead': PointIntraPartOffsetHead, + 'PointHeadSimple': PointHeadSimple, + 'PointHeadBox': PointHeadBox, + 'AnchorHeadMulti': AnchorHeadMulti, + 'CenterHead': CenterHead, + 'VoxelNeXtHead': VoxelNeXtHead, + 'TransFusionHead': TransFusionHead, +} diff --git a/toolbox/openpcdet/pcdet/models/dense_heads/anchor_head_multi.py b/toolbox/openpcdet/pcdet/models/dense_heads/anchor_head_multi.py new file mode 100644 index 000000000..f562e4328 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/dense_heads/anchor_head_multi.py @@ -0,0 +1,373 @@ +import numpy as np +import torch +import torch.nn as nn + +from ..backbones_2d import BaseBEVBackbone +from .anchor_head_template import AnchorHeadTemplate + + +class SingleHead(BaseBEVBackbone): + def __init__(self, model_cfg, input_channels, num_class, num_anchors_per_location, code_size, rpn_head_cfg=None, + head_label_indices=None, separate_reg_config=None): + super().__init__(rpn_head_cfg, input_channels) + + self.num_anchors_per_location = num_anchors_per_location + self.num_class = num_class + self.code_size = code_size + self.model_cfg = model_cfg + self.separate_reg_config = separate_reg_config + self.register_buffer('head_label_indices', head_label_indices) + + if self.separate_reg_config is not None: + code_size_cnt = 0 + self.conv_box = nn.ModuleDict() + self.conv_box_names = [] + num_middle_conv = self.separate_reg_config.NUM_MIDDLE_CONV + num_middle_filter = self.separate_reg_config.NUM_MIDDLE_FILTER + conv_cls_list = [] + c_in = input_channels + for k in range(num_middle_conv): + conv_cls_list.extend([ + nn.Conv2d( + c_in, num_middle_filter, + kernel_size=3, stride=1, padding=1, bias=False + ), + nn.BatchNorm2d(num_middle_filter), + nn.ReLU() + ]) + c_in = num_middle_filter + conv_cls_list.append(nn.Conv2d( + c_in, self.num_anchors_per_location * self.num_class, + kernel_size=3, stride=1, padding=1 + )) + self.conv_cls = nn.Sequential(*conv_cls_list) + + for reg_config in self.separate_reg_config.REG_LIST: + reg_name, reg_channel = reg_config.split(':') + reg_channel = int(reg_channel) + cur_conv_list = [] + c_in = input_channels + for k in range(num_middle_conv): + cur_conv_list.extend([ + nn.Conv2d( + c_in, num_middle_filter, + kernel_size=3, stride=1, padding=1, bias=False + ), + nn.BatchNorm2d(num_middle_filter), + nn.ReLU() + ]) + c_in = num_middle_filter + + cur_conv_list.append(nn.Conv2d( + c_in, self.num_anchors_per_location * int(reg_channel), + kernel_size=3, stride=1, padding=1, bias=True + )) + code_size_cnt += reg_channel + self.conv_box[f'conv_{reg_name}'] = nn.Sequential(*cur_conv_list) + self.conv_box_names.append(f'conv_{reg_name}') + + for m in self.conv_box.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + assert code_size_cnt == code_size, f'Code size does not match: {code_size_cnt}:{code_size}' + else: + self.conv_cls = nn.Conv2d( + input_channels, self.num_anchors_per_location * self.num_class, + kernel_size=1 + ) + self.conv_box = nn.Conv2d( + input_channels, self.num_anchors_per_location * self.code_size, + kernel_size=1 + ) + + if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None: + self.conv_dir_cls = nn.Conv2d( + input_channels, + self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS, + kernel_size=1 + ) + else: + self.conv_dir_cls = None + self.use_multihead = self.model_cfg.get('USE_MULTIHEAD', False) + self.init_weights() + + def init_weights(self): + pi = 0.01 + if isinstance(self.conv_cls, nn.Conv2d): + nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi)) + else: + nn.init.constant_(self.conv_cls[-1].bias, -np.log((1 - pi) / pi)) + + def forward(self, spatial_features_2d): + ret_dict = {} + spatial_features_2d = super().forward({'spatial_features': spatial_features_2d})['spatial_features_2d'] + + cls_preds = self.conv_cls(spatial_features_2d) + + if self.separate_reg_config is None: + box_preds = self.conv_box(spatial_features_2d) + else: + box_preds_list = [] + for reg_name in self.conv_box_names: + box_preds_list.append(self.conv_box[reg_name](spatial_features_2d)) + box_preds = torch.cat(box_preds_list, dim=1) + + if not self.use_multihead: + box_preds = box_preds.permute(0, 2, 3, 1).contiguous() + cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() + else: + H, W = box_preds.shape[2:] + batch_size = box_preds.shape[0] + box_preds = box_preds.view(-1, self.num_anchors_per_location, + self.code_size, H, W).permute(0, 1, 3, 4, 2).contiguous() + cls_preds = cls_preds.view(-1, self.num_anchors_per_location, + self.num_class, H, W).permute(0, 1, 3, 4, 2).contiguous() + box_preds = box_preds.view(batch_size, -1, self.code_size) + cls_preds = cls_preds.view(batch_size, -1, self.num_class) + + if self.conv_dir_cls is not None: + dir_cls_preds = self.conv_dir_cls(spatial_features_2d) + if self.use_multihead: + dir_cls_preds = dir_cls_preds.view( + -1, self.num_anchors_per_location, self.model_cfg.NUM_DIR_BINS, H, W).permute(0, 1, 3, 4, + 2).contiguous() + dir_cls_preds = dir_cls_preds.view(batch_size, -1, self.model_cfg.NUM_DIR_BINS) + else: + dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous() + + else: + dir_cls_preds = None + + ret_dict['cls_preds'] = cls_preds + ret_dict['box_preds'] = box_preds + ret_dict['dir_cls_preds'] = dir_cls_preds + + return ret_dict + + +class AnchorHeadMulti(AnchorHeadTemplate): + def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range, + predict_boxes_when_training=True, **kwargs): + super().__init__( + model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, + point_cloud_range=point_cloud_range, predict_boxes_when_training=predict_boxes_when_training + ) + self.model_cfg = model_cfg + self.separate_multihead = self.model_cfg.get('SEPARATE_MULTIHEAD', False) + + if self.model_cfg.get('SHARED_CONV_NUM_FILTER', None) is not None: + shared_conv_num_filter = self.model_cfg.SHARED_CONV_NUM_FILTER + self.shared_conv = nn.Sequential( + nn.Conv2d(input_channels, shared_conv_num_filter, 3, stride=1, padding=1, bias=False), + nn.BatchNorm2d(shared_conv_num_filter, eps=1e-3, momentum=0.01), + nn.ReLU(), + ) + else: + self.shared_conv = None + shared_conv_num_filter = input_channels + self.rpn_heads = None + self.make_multihead(shared_conv_num_filter) + + def make_multihead(self, input_channels): + rpn_head_cfgs = self.model_cfg.RPN_HEAD_CFGS + rpn_heads = [] + class_names = [] + for rpn_head_cfg in rpn_head_cfgs: + class_names.extend(rpn_head_cfg['HEAD_CLS_NAME']) + + for rpn_head_cfg in rpn_head_cfgs: + num_anchors_per_location = sum([self.num_anchors_per_location[class_names.index(head_cls)] + for head_cls in rpn_head_cfg['HEAD_CLS_NAME']]) + head_label_indices = torch.from_numpy(np.array([ + self.class_names.index(cur_name) + 1 for cur_name in rpn_head_cfg['HEAD_CLS_NAME'] + ])) + + rpn_head = SingleHead( + self.model_cfg, input_channels, + len(rpn_head_cfg['HEAD_CLS_NAME']) if self.separate_multihead else self.num_class, + num_anchors_per_location, self.box_coder.code_size, rpn_head_cfg, + head_label_indices=head_label_indices, + separate_reg_config=self.model_cfg.get('SEPARATE_REG_CONFIG', None) + ) + rpn_heads.append(rpn_head) + self.rpn_heads = nn.ModuleList(rpn_heads) + + def forward(self, data_dict): + spatial_features_2d = data_dict['spatial_features_2d'] + if self.shared_conv is not None: + spatial_features_2d = self.shared_conv(spatial_features_2d) + + ret_dicts = [] + for rpn_head in self.rpn_heads: + ret_dicts.append(rpn_head(spatial_features_2d)) + + cls_preds = [ret_dict['cls_preds'] for ret_dict in ret_dicts] + box_preds = [ret_dict['box_preds'] for ret_dict in ret_dicts] + ret = { + 'cls_preds': cls_preds if self.separate_multihead else torch.cat(cls_preds, dim=1), + 'box_preds': box_preds if self.separate_multihead else torch.cat(box_preds, dim=1), + } + + if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', False): + dir_cls_preds = [ret_dict['dir_cls_preds'] for ret_dict in ret_dicts] + ret['dir_cls_preds'] = dir_cls_preds if self.separate_multihead else torch.cat(dir_cls_preds, dim=1) + + self.forward_ret_dict.update(ret) + + if self.training: + targets_dict = self.assign_targets( + gt_boxes=data_dict['gt_boxes'] + ) + self.forward_ret_dict.update(targets_dict) + + if not self.training or self.predict_boxes_when_training: + batch_cls_preds, batch_box_preds = self.generate_predicted_boxes( + batch_size=data_dict['batch_size'], + cls_preds=ret['cls_preds'], box_preds=ret['box_preds'], dir_cls_preds=ret.get('dir_cls_preds', None) + ) + + if isinstance(batch_cls_preds, list): + multihead_label_mapping = [] + for idx in range(len(batch_cls_preds)): + multihead_label_mapping.append(self.rpn_heads[idx].head_label_indices) + + data_dict['multihead_label_mapping'] = multihead_label_mapping + + data_dict['batch_cls_preds'] = batch_cls_preds + data_dict['batch_box_preds'] = batch_box_preds + data_dict['cls_preds_normalized'] = False + + return data_dict + + def get_cls_layer_loss(self): + loss_weights = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS + if 'pos_cls_weight' in loss_weights: + pos_cls_weight = loss_weights['pos_cls_weight'] + neg_cls_weight = loss_weights['neg_cls_weight'] + else: + pos_cls_weight = neg_cls_weight = 1.0 + + cls_preds = self.forward_ret_dict['cls_preds'] + box_cls_labels = self.forward_ret_dict['box_cls_labels'] + if not isinstance(cls_preds, list): + cls_preds = [cls_preds] + batch_size = int(cls_preds[0].shape[0]) + cared = box_cls_labels >= 0 # [N, num_anchors] + positives = box_cls_labels > 0 + negatives = box_cls_labels == 0 + negative_cls_weights = negatives * 1.0 * neg_cls_weight + + cls_weights = (negative_cls_weights + pos_cls_weight * positives).float() + + reg_weights = positives.float() + if self.num_class == 1: + # class agnostic + box_cls_labels[positives] = 1 + pos_normalizer = positives.sum(1, keepdim=True).float() + + reg_weights /= torch.clamp(pos_normalizer, min=1.0) + cls_weights /= torch.clamp(pos_normalizer, min=1.0) + cls_targets = box_cls_labels * cared.type_as(box_cls_labels) + one_hot_targets = torch.zeros( + *list(cls_targets.shape), self.num_class + 1, dtype=cls_preds[0].dtype, device=cls_targets.device + ) + one_hot_targets.scatter_(-1, cls_targets.unsqueeze(dim=-1).long(), 1.0) + one_hot_targets = one_hot_targets[..., 1:] + start_idx = c_idx = 0 + cls_losses = 0 + + for idx, cls_pred in enumerate(cls_preds): + cur_num_class = self.rpn_heads[idx].num_class + cls_pred = cls_pred.view(batch_size, -1, cur_num_class) + if self.separate_multihead: + one_hot_target = one_hot_targets[:, start_idx:start_idx + cls_pred.shape[1], + c_idx:c_idx + cur_num_class] + c_idx += cur_num_class + else: + one_hot_target = one_hot_targets[:, start_idx:start_idx + cls_pred.shape[1]] + cls_weight = cls_weights[:, start_idx:start_idx + cls_pred.shape[1]] + cls_loss_src = self.cls_loss_func(cls_pred, one_hot_target, weights=cls_weight) # [N, M] + cls_loss = cls_loss_src.sum() / batch_size + cls_loss = cls_loss * loss_weights['cls_weight'] + cls_losses += cls_loss + start_idx += cls_pred.shape[1] + assert start_idx == one_hot_targets.shape[1] + tb_dict = { + 'rpn_loss_cls': cls_losses.item() + } + return cls_losses, tb_dict + + def get_box_reg_layer_loss(self): + box_preds = self.forward_ret_dict['box_preds'] + box_dir_cls_preds = self.forward_ret_dict.get('dir_cls_preds', None) + box_reg_targets = self.forward_ret_dict['box_reg_targets'] + box_cls_labels = self.forward_ret_dict['box_cls_labels'] + + positives = box_cls_labels > 0 + reg_weights = positives.float() + pos_normalizer = positives.sum(1, keepdim=True).float() + reg_weights /= torch.clamp(pos_normalizer, min=1.0) + + if not isinstance(box_preds, list): + box_preds = [box_preds] + batch_size = int(box_preds[0].shape[0]) + + if isinstance(self.anchors, list): + if self.use_multihead: + anchors = torch.cat( + [anchor.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchor.shape[-1]) + for anchor in self.anchors], dim=0 + ) + else: + anchors = torch.cat(self.anchors, dim=-3) + else: + anchors = self.anchors + anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1) + + start_idx = 0 + box_losses = 0 + tb_dict = {} + for idx, box_pred in enumerate(box_preds): + box_pred = box_pred.view( + batch_size, -1, + box_pred.shape[-1] // self.num_anchors_per_location if not self.use_multihead else box_pred.shape[-1] + ) + box_reg_target = box_reg_targets[:, start_idx:start_idx + box_pred.shape[1]] + reg_weight = reg_weights[:, start_idx:start_idx + box_pred.shape[1]] + # sin(a - b) = sinacosb-cosasinb + if box_dir_cls_preds is not None: + box_pred_sin, reg_target_sin = self.add_sin_difference(box_pred, box_reg_target) + loc_loss_src = self.reg_loss_func(box_pred_sin, reg_target_sin, weights=reg_weight) # [N, M] + else: + loc_loss_src = self.reg_loss_func(box_pred, box_reg_target, weights=reg_weight) # [N, M] + loc_loss = loc_loss_src.sum() / batch_size + + loc_loss = loc_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['loc_weight'] + box_losses += loc_loss + tb_dict['rpn_loss_loc'] = tb_dict.get('rpn_loss_loc', 0) + loc_loss.item() + + if box_dir_cls_preds is not None: + if not isinstance(box_dir_cls_preds, list): + box_dir_cls_preds = [box_dir_cls_preds] + dir_targets = self.get_direction_target( + anchors, box_reg_targets, + dir_offset=self.model_cfg.DIR_OFFSET, + num_bins=self.model_cfg.NUM_DIR_BINS + ) + box_dir_cls_pred = box_dir_cls_preds[idx] + dir_logit = box_dir_cls_pred.view(batch_size, -1, self.model_cfg.NUM_DIR_BINS) + weights = positives.type_as(dir_logit) + weights /= torch.clamp(weights.sum(-1, keepdim=True), min=1.0) + + weight = weights[:, start_idx:start_idx + box_pred.shape[1]] + dir_target = dir_targets[:, start_idx:start_idx + box_pred.shape[1]] + dir_loss = self.dir_loss_func(dir_logit, dir_target, weights=weight) + dir_loss = dir_loss.sum() / batch_size + dir_loss = dir_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['dir_weight'] + box_losses += dir_loss + tb_dict['rpn_loss_dir'] = tb_dict.get('rpn_loss_dir', 0) + dir_loss.item() + start_idx += box_pred.shape[1] + return box_losses, tb_dict diff --git a/toolbox/openpcdet/pcdet/models/dense_heads/anchor_head_single.py b/toolbox/openpcdet/pcdet/models/dense_heads/anchor_head_single.py new file mode 100644 index 000000000..83c62cce9 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/dense_heads/anchor_head_single.py @@ -0,0 +1,75 @@ +import numpy as np +import torch.nn as nn + +from .anchor_head_template import AnchorHeadTemplate + + +class AnchorHeadSingle(AnchorHeadTemplate): + def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range, + predict_boxes_when_training=True, **kwargs): + super().__init__( + model_cfg=model_cfg, num_class=num_class, class_names=class_names, grid_size=grid_size, point_cloud_range=point_cloud_range, + predict_boxes_when_training=predict_boxes_when_training + ) + + self.num_anchors_per_location = sum(self.num_anchors_per_location) + + self.conv_cls = nn.Conv2d( + input_channels, self.num_anchors_per_location * self.num_class, + kernel_size=1 + ) + self.conv_box = nn.Conv2d( + input_channels, self.num_anchors_per_location * self.box_coder.code_size, + kernel_size=1 + ) + + if self.model_cfg.get('USE_DIRECTION_CLASSIFIER', None) is not None: + self.conv_dir_cls = nn.Conv2d( + input_channels, + self.num_anchors_per_location * self.model_cfg.NUM_DIR_BINS, + kernel_size=1 + ) + else: + self.conv_dir_cls = None + self.init_weights() + + def init_weights(self): + pi = 0.01 + nn.init.constant_(self.conv_cls.bias, -np.log((1 - pi) / pi)) + nn.init.normal_(self.conv_box.weight, mean=0, std=0.001) + + def forward(self, data_dict): + spatial_features_2d = data_dict['spatial_features_2d'] + + cls_preds = self.conv_cls(spatial_features_2d) + box_preds = self.conv_box(spatial_features_2d) + + cls_preds = cls_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C] + box_preds = box_preds.permute(0, 2, 3, 1).contiguous() # [N, H, W, C] + + self.forward_ret_dict['cls_preds'] = cls_preds + self.forward_ret_dict['box_preds'] = box_preds + + if self.conv_dir_cls is not None: + dir_cls_preds = self.conv_dir_cls(spatial_features_2d) + dir_cls_preds = dir_cls_preds.permute(0, 2, 3, 1).contiguous() + self.forward_ret_dict['dir_cls_preds'] = dir_cls_preds + else: + dir_cls_preds = None + + if self.training: + targets_dict = self.assign_targets( + gt_boxes=data_dict['gt_boxes'] + ) + self.forward_ret_dict.update(targets_dict) + + if not self.training or self.predict_boxes_when_training: + batch_cls_preds, batch_box_preds = self.generate_predicted_boxes( + batch_size=data_dict['batch_size'], + cls_preds=cls_preds, box_preds=box_preds, dir_cls_preds=dir_cls_preds + ) + data_dict['batch_cls_preds'] = batch_cls_preds + data_dict['batch_box_preds'] = batch_box_preds + data_dict['cls_preds_normalized'] = False + + return data_dict diff --git a/toolbox/openpcdet/pcdet/models/dense_heads/anchor_head_template.py b/toolbox/openpcdet/pcdet/models/dense_heads/anchor_head_template.py new file mode 100644 index 000000000..db8167a49 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/dense_heads/anchor_head_template.py @@ -0,0 +1,275 @@ +import numpy as np +import torch +import torch.nn as nn + +from ...utils import box_coder_utils, common_utils, loss_utils +from .target_assigner.anchor_generator import AnchorGenerator +from .target_assigner.atss_target_assigner import ATSSTargetAssigner +from .target_assigner.axis_aligned_target_assigner import AxisAlignedTargetAssigner + + +class AnchorHeadTemplate(nn.Module): + def __init__(self, model_cfg, num_class, class_names, grid_size, point_cloud_range, predict_boxes_when_training): + super().__init__() + self.model_cfg = model_cfg + self.num_class = num_class + self.class_names = class_names + self.predict_boxes_when_training = predict_boxes_when_training + self.use_multihead = self.model_cfg.get('USE_MULTIHEAD', False) + + anchor_target_cfg = self.model_cfg.TARGET_ASSIGNER_CONFIG + self.box_coder = getattr(box_coder_utils, anchor_target_cfg.BOX_CODER)( + num_dir_bins=anchor_target_cfg.get('NUM_DIR_BINS', 6), + **anchor_target_cfg.get('BOX_CODER_CONFIG', {}) + ) + + anchor_generator_cfg = self.model_cfg.ANCHOR_GENERATOR_CONFIG + anchors, self.num_anchors_per_location = self.generate_anchors( + anchor_generator_cfg, grid_size=grid_size, point_cloud_range=point_cloud_range, + anchor_ndim=self.box_coder.code_size + ) + self.anchors = [x.cuda() for x in anchors] + self.target_assigner = self.get_target_assigner(anchor_target_cfg) + + self.forward_ret_dict = {} + self.build_losses(self.model_cfg.LOSS_CONFIG) + + @staticmethod + def generate_anchors(anchor_generator_cfg, grid_size, point_cloud_range, anchor_ndim=7): + anchor_generator = AnchorGenerator( + anchor_range=point_cloud_range, + anchor_generator_config=anchor_generator_cfg + ) + feature_map_size = [grid_size[:2] // config['feature_map_stride'] for config in anchor_generator_cfg] + anchors_list, num_anchors_per_location_list = anchor_generator.generate_anchors(feature_map_size) + + if anchor_ndim != 7: + for idx, anchors in enumerate(anchors_list): + pad_zeros = anchors.new_zeros([*anchors.shape[0:-1], anchor_ndim - 7]) + new_anchors = torch.cat((anchors, pad_zeros), dim=-1) + anchors_list[idx] = new_anchors + + return anchors_list, num_anchors_per_location_list + + def get_target_assigner(self, anchor_target_cfg): + if anchor_target_cfg.NAME == 'ATSS': + target_assigner = ATSSTargetAssigner( + topk=anchor_target_cfg.TOPK, + box_coder=self.box_coder, + use_multihead=self.use_multihead, + match_height=anchor_target_cfg.MATCH_HEIGHT + ) + elif anchor_target_cfg.NAME == 'AxisAlignedTargetAssigner': + target_assigner = AxisAlignedTargetAssigner( + model_cfg=self.model_cfg, + class_names=self.class_names, + box_coder=self.box_coder, + match_height=anchor_target_cfg.MATCH_HEIGHT + ) + else: + raise NotImplementedError + return target_assigner + + def build_losses(self, losses_cfg): + self.add_module( + 'cls_loss_func', + loss_utils.SigmoidFocalClassificationLoss(alpha=0.25, gamma=2.0) + ) + reg_loss_name = 'WeightedSmoothL1Loss' if losses_cfg.get('REG_LOSS_TYPE', None) is None \ + else losses_cfg.REG_LOSS_TYPE + self.add_module( + 'reg_loss_func', + getattr(loss_utils, reg_loss_name)(code_weights=losses_cfg.LOSS_WEIGHTS['code_weights']) + ) + self.add_module( + 'dir_loss_func', + loss_utils.WeightedCrossEntropyLoss() + ) + + def assign_targets(self, gt_boxes): + """ + Args: + gt_boxes: (B, M, 8) + Returns: + + """ + targets_dict = self.target_assigner.assign_targets( + self.anchors, gt_boxes + ) + return targets_dict + + def get_cls_layer_loss(self): + cls_preds = self.forward_ret_dict['cls_preds'] + box_cls_labels = self.forward_ret_dict['box_cls_labels'] + batch_size = int(cls_preds.shape[0]) + cared = box_cls_labels >= 0 # [N, num_anchors] + positives = box_cls_labels > 0 + negatives = box_cls_labels == 0 + negative_cls_weights = negatives * 1.0 + cls_weights = (negative_cls_weights + 1.0 * positives).float() + reg_weights = positives.float() + if self.num_class == 1: + # class agnostic + box_cls_labels[positives] = 1 + + pos_normalizer = positives.sum(1, keepdim=True).float() + reg_weights /= torch.clamp(pos_normalizer, min=1.0) + cls_weights /= torch.clamp(pos_normalizer, min=1.0) + cls_targets = box_cls_labels * cared.type_as(box_cls_labels) + cls_targets = cls_targets.unsqueeze(dim=-1) + + cls_targets = cls_targets.squeeze(dim=-1) + one_hot_targets = torch.zeros( + *list(cls_targets.shape), self.num_class + 1, dtype=cls_preds.dtype, device=cls_targets.device + ) + one_hot_targets.scatter_(-1, cls_targets.unsqueeze(dim=-1).long(), 1.0) + cls_preds = cls_preds.view(batch_size, -1, self.num_class) + one_hot_targets = one_hot_targets[..., 1:] + cls_loss_src = self.cls_loss_func(cls_preds, one_hot_targets, weights=cls_weights) # [N, M] + cls_loss = cls_loss_src.sum() / batch_size + + cls_loss = cls_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['cls_weight'] + tb_dict = { + 'rpn_loss_cls': cls_loss.item() + } + return cls_loss, tb_dict + + @staticmethod + def add_sin_difference(boxes1, boxes2, dim=6): + assert dim != -1 + rad_pred_encoding = torch.sin(boxes1[..., dim:dim + 1]) * torch.cos(boxes2[..., dim:dim + 1]) + rad_tg_encoding = torch.cos(boxes1[..., dim:dim + 1]) * torch.sin(boxes2[..., dim:dim + 1]) + boxes1 = torch.cat([boxes1[..., :dim], rad_pred_encoding, boxes1[..., dim + 1:]], dim=-1) + boxes2 = torch.cat([boxes2[..., :dim], rad_tg_encoding, boxes2[..., dim + 1:]], dim=-1) + return boxes1, boxes2 + + @staticmethod + def get_direction_target(anchors, reg_targets, one_hot=True, dir_offset=0, num_bins=2): + batch_size = reg_targets.shape[0] + anchors = anchors.view(batch_size, -1, anchors.shape[-1]) + rot_gt = reg_targets[..., 6] + anchors[..., 6] + offset_rot = common_utils.limit_period(rot_gt - dir_offset, 0, 2 * np.pi) + dir_cls_targets = torch.floor(offset_rot / (2 * np.pi / num_bins)).long() + dir_cls_targets = torch.clamp(dir_cls_targets, min=0, max=num_bins - 1) + + if one_hot: + dir_targets = torch.zeros(*list(dir_cls_targets.shape), num_bins, dtype=anchors.dtype, + device=dir_cls_targets.device) + dir_targets.scatter_(-1, dir_cls_targets.unsqueeze(dim=-1).long(), 1.0) + dir_cls_targets = dir_targets + return dir_cls_targets + + def get_box_reg_layer_loss(self): + box_preds = self.forward_ret_dict['box_preds'] + box_dir_cls_preds = self.forward_ret_dict.get('dir_cls_preds', None) + box_reg_targets = self.forward_ret_dict['box_reg_targets'] + box_cls_labels = self.forward_ret_dict['box_cls_labels'] + batch_size = int(box_preds.shape[0]) + + positives = box_cls_labels > 0 + reg_weights = positives.float() + pos_normalizer = positives.sum(1, keepdim=True).float() + reg_weights /= torch.clamp(pos_normalizer, min=1.0) + + if isinstance(self.anchors, list): + if self.use_multihead: + anchors = torch.cat( + [anchor.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchor.shape[-1]) for anchor in + self.anchors], dim=0) + else: + anchors = torch.cat(self.anchors, dim=-3) + else: + anchors = self.anchors + anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1) + box_preds = box_preds.view(batch_size, -1, + box_preds.shape[-1] // self.num_anchors_per_location if not self.use_multihead else + box_preds.shape[-1]) + # sin(a - b) = sinacosb-cosasinb + box_preds_sin, reg_targets_sin = self.add_sin_difference(box_preds, box_reg_targets) + loc_loss_src = self.reg_loss_func(box_preds_sin, reg_targets_sin, weights=reg_weights) # [N, M] + loc_loss = loc_loss_src.sum() / batch_size + + loc_loss = loc_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['loc_weight'] + box_loss = loc_loss + tb_dict = { + 'rpn_loss_loc': loc_loss.item() + } + + if box_dir_cls_preds is not None: + dir_targets = self.get_direction_target( + anchors, box_reg_targets, + dir_offset=self.model_cfg.DIR_OFFSET, + num_bins=self.model_cfg.NUM_DIR_BINS + ) + + dir_logits = box_dir_cls_preds.view(batch_size, -1, self.model_cfg.NUM_DIR_BINS) + weights = positives.type_as(dir_logits) + weights /= torch.clamp(weights.sum(-1, keepdim=True), min=1.0) + dir_loss = self.dir_loss_func(dir_logits, dir_targets, weights=weights) + dir_loss = dir_loss.sum() / batch_size + dir_loss = dir_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['dir_weight'] + box_loss += dir_loss + tb_dict['rpn_loss_dir'] = dir_loss.item() + + return box_loss, tb_dict + + def get_loss(self): + cls_loss, tb_dict = self.get_cls_layer_loss() + box_loss, tb_dict_box = self.get_box_reg_layer_loss() + tb_dict.update(tb_dict_box) + rpn_loss = cls_loss + box_loss + + tb_dict['rpn_loss'] = rpn_loss.item() + return rpn_loss, tb_dict + + def generate_predicted_boxes(self, batch_size, cls_preds, box_preds, dir_cls_preds=None): + """ + Args: + batch_size: + cls_preds: (N, H, W, C1) + box_preds: (N, H, W, C2) + dir_cls_preds: (N, H, W, C3) + + Returns: + batch_cls_preds: (B, num_boxes, num_classes) + batch_box_preds: (B, num_boxes, 7+C) + + """ + if isinstance(self.anchors, list): + if self.use_multihead: + anchors = torch.cat([anchor.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchor.shape[-1]) + for anchor in self.anchors], dim=0) + else: + anchors = torch.cat(self.anchors, dim=-3) + else: + anchors = self.anchors + num_anchors = anchors.view(-1, anchors.shape[-1]).shape[0] + batch_anchors = anchors.view(1, -1, anchors.shape[-1]).repeat(batch_size, 1, 1) + batch_cls_preds = cls_preds.view(batch_size, num_anchors, -1).float() \ + if not isinstance(cls_preds, list) else cls_preds + batch_box_preds = box_preds.view(batch_size, num_anchors, -1) if not isinstance(box_preds, list) \ + else torch.cat(box_preds, dim=1).view(batch_size, num_anchors, -1) + batch_box_preds = self.box_coder.decode_torch(batch_box_preds, batch_anchors) + + if dir_cls_preds is not None: + dir_offset = self.model_cfg.DIR_OFFSET + dir_limit_offset = self.model_cfg.DIR_LIMIT_OFFSET + dir_cls_preds = dir_cls_preds.view(batch_size, num_anchors, -1) if not isinstance(dir_cls_preds, list) \ + else torch.cat(dir_cls_preds, dim=1).view(batch_size, num_anchors, -1) + dir_labels = torch.max(dir_cls_preds, dim=-1)[1] + + period = (2 * np.pi / self.model_cfg.NUM_DIR_BINS) + dir_rot = common_utils.limit_period( + batch_box_preds[..., 6] - dir_offset, dir_limit_offset, period + ) + batch_box_preds[..., 6] = dir_rot + dir_offset + period * dir_labels.to(batch_box_preds.dtype) + + if isinstance(self.box_coder, box_coder_utils.PreviousResidualDecoder): + batch_box_preds[..., 6] = common_utils.limit_period( + -(batch_box_preds[..., 6] + np.pi / 2), offset=0.5, period=np.pi * 2 + ) + + return batch_cls_preds, batch_box_preds + + def forward(self, **kwargs): + raise NotImplementedError diff --git a/toolbox/openpcdet/pcdet/models/dense_heads/center_head.py b/toolbox/openpcdet/pcdet/models/dense_heads/center_head.py new file mode 100644 index 000000000..38a6e3536 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/dense_heads/center_head.py @@ -0,0 +1,416 @@ +import copy +import numpy as np +import torch +import torch.nn as nn +from torch.nn.init import kaiming_normal_ +from ..model_utils import model_nms_utils +from ..model_utils import centernet_utils +from ...utils import loss_utils +from functools import partial + + +class SeparateHead(nn.Module): + def __init__(self, input_channels, sep_head_dict, init_bias=-2.19, use_bias=False, norm_func=None): + super().__init__() + self.sep_head_dict = sep_head_dict + + for cur_name in self.sep_head_dict: + output_channels = self.sep_head_dict[cur_name]['out_channels'] + num_conv = self.sep_head_dict[cur_name]['num_conv'] + + fc_list = [] + for k in range(num_conv - 1): + fc_list.append(nn.Sequential( + nn.Conv2d(input_channels, input_channels, kernel_size=3, stride=1, padding=1, bias=use_bias), + nn.BatchNorm2d(input_channels) if norm_func is None else norm_func(input_channels), + nn.ReLU() + )) + fc_list.append(nn.Conv2d(input_channels, output_channels, kernel_size=3, stride=1, padding=1, bias=True)) + fc = nn.Sequential(*fc_list) + if 'hm' in cur_name: + fc[-1].bias.data.fill_(init_bias) + else: + for m in fc.modules(): + if isinstance(m, nn.Conv2d): + kaiming_normal_(m.weight.data) + if hasattr(m, "bias") and m.bias is not None: + nn.init.constant_(m.bias, 0) + + self.__setattr__(cur_name, fc) + + def forward(self, x): + ret_dict = {} + for cur_name in self.sep_head_dict: + ret_dict[cur_name] = self.__getattr__(cur_name)(x) + + return ret_dict + + +class CenterHead(nn.Module): + def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range, voxel_size, + predict_boxes_when_training=True): + super().__init__() + self.model_cfg = model_cfg + self.num_class = num_class + self.grid_size = grid_size + self.point_cloud_range = point_cloud_range + self.voxel_size = voxel_size + self.feature_map_stride = self.model_cfg.TARGET_ASSIGNER_CONFIG.get('FEATURE_MAP_STRIDE', None) + + self.class_names = class_names + self.class_names_each_head = [] + self.class_id_mapping_each_head = [] + + for cur_class_names in self.model_cfg.CLASS_NAMES_EACH_HEAD: + self.class_names_each_head.append([x for x in cur_class_names if x in class_names]) + cur_class_id_mapping = torch.from_numpy(np.array( + [self.class_names.index(x) for x in cur_class_names if x in class_names] + )).cuda() + self.class_id_mapping_each_head.append(cur_class_id_mapping) + + total_classes = sum([len(x) for x in self.class_names_each_head]) + assert total_classes == len(self.class_names), f'class_names_each_head={self.class_names_each_head}' + + norm_func = partial(nn.BatchNorm2d, eps=self.model_cfg.get('BN_EPS', 1e-5), momentum=self.model_cfg.get('BN_MOM', 0.1)) + self.shared_conv = nn.Sequential( + nn.Conv2d( + input_channels, self.model_cfg.SHARED_CONV_CHANNEL, 3, stride=1, padding=1, + bias=self.model_cfg.get('USE_BIAS_BEFORE_NORM', False) + ), + norm_func(self.model_cfg.SHARED_CONV_CHANNEL), + nn.ReLU(), + ) + + self.heads_list = nn.ModuleList() + self.separate_head_cfg = self.model_cfg.SEPARATE_HEAD_CFG + for idx, cur_class_names in enumerate(self.class_names_each_head): + cur_head_dict = copy.deepcopy(self.separate_head_cfg.HEAD_DICT) + cur_head_dict['hm'] = dict(out_channels=len(cur_class_names), num_conv=self.model_cfg.NUM_HM_CONV) + self.heads_list.append( + SeparateHead( + input_channels=self.model_cfg.SHARED_CONV_CHANNEL, + sep_head_dict=cur_head_dict, + init_bias=-2.19, + use_bias=self.model_cfg.get('USE_BIAS_BEFORE_NORM', False), + norm_func=norm_func + ) + ) + self.predict_boxes_when_training = predict_boxes_when_training + self.forward_ret_dict = {} + self.build_losses() + + def build_losses(self): + self.add_module('hm_loss_func', loss_utils.FocalLossCenterNet()) + self.add_module('reg_loss_func', loss_utils.RegLossCenterNet()) + + def assign_target_of_single_head( + self, num_classes, gt_boxes, feature_map_size, feature_map_stride, num_max_objs=500, + gaussian_overlap=0.1, min_radius=2 + ): + """ + Args: + gt_boxes: (N, 8) + feature_map_size: (2), [x, y] + + Returns: + + """ + heatmap = gt_boxes.new_zeros(num_classes, feature_map_size[1], feature_map_size[0]) + ret_boxes = gt_boxes.new_zeros((num_max_objs, gt_boxes.shape[-1] - 1 + 1)) + inds = gt_boxes.new_zeros(num_max_objs).long() + mask = gt_boxes.new_zeros(num_max_objs).long() + ret_boxes_src = gt_boxes.new_zeros(num_max_objs, gt_boxes.shape[-1]) + ret_boxes_src[:gt_boxes.shape[0]] = gt_boxes + + x, y, z = gt_boxes[:, 0], gt_boxes[:, 1], gt_boxes[:, 2] + coord_x = (x - self.point_cloud_range[0]) / self.voxel_size[0] / feature_map_stride + coord_y = (y - self.point_cloud_range[1]) / self.voxel_size[1] / feature_map_stride + coord_x = torch.clamp(coord_x, min=0, max=feature_map_size[0] - 0.5) # bugfixed: 1e-6 does not work for center.int() + coord_y = torch.clamp(coord_y, min=0, max=feature_map_size[1] - 0.5) # + center = torch.cat((coord_x[:, None], coord_y[:, None]), dim=-1) + center_int = center.int() + center_int_float = center_int.float() + + dx, dy, dz = gt_boxes[:, 3], gt_boxes[:, 4], gt_boxes[:, 5] + dx = dx / self.voxel_size[0] / feature_map_stride + dy = dy / self.voxel_size[1] / feature_map_stride + + radius = centernet_utils.gaussian_radius(dx, dy, min_overlap=gaussian_overlap) + radius = torch.clamp_min(radius.int(), min=min_radius) + + for k in range(min(num_max_objs, gt_boxes.shape[0])): + if dx[k] <= 0 or dy[k] <= 0: + continue + + if not (0 <= center_int[k][0] <= feature_map_size[0] and 0 <= center_int[k][1] <= feature_map_size[1]): + continue + + cur_class_id = (gt_boxes[k, -1] - 1).long() + centernet_utils.draw_gaussian_to_heatmap(heatmap[cur_class_id], center[k], radius[k].item()) + + inds[k] = center_int[k, 1] * feature_map_size[0] + center_int[k, 0] + mask[k] = 1 + + ret_boxes[k, 0:2] = center[k] - center_int_float[k].float() + ret_boxes[k, 2] = z[k] + ret_boxes[k, 3:6] = gt_boxes[k, 3:6].log() + ret_boxes[k, 6] = torch.cos(gt_boxes[k, 6]) + ret_boxes[k, 7] = torch.sin(gt_boxes[k, 6]) + if gt_boxes.shape[1] > 8: + ret_boxes[k, 8:] = gt_boxes[k, 7:-1] + + return heatmap, ret_boxes, inds, mask, ret_boxes_src + + def assign_targets(self, gt_boxes, feature_map_size=None, **kwargs): + """ + Args: + gt_boxes: (B, M, 8) + range_image_polar: (B, 3, H, W) + feature_map_size: (2) [H, W] + spatial_cartesian: (B, 4, H, W) + Returns: + + """ + feature_map_size = feature_map_size[::-1] # [H, W] ==> [x, y] + target_assigner_cfg = self.model_cfg.TARGET_ASSIGNER_CONFIG + # feature_map_size = self.grid_size[:2] // target_assigner_cfg.FEATURE_MAP_STRIDE + + batch_size = gt_boxes.shape[0] + ret_dict = { + 'heatmaps': [], + 'target_boxes': [], + 'inds': [], + 'masks': [], + 'heatmap_masks': [], + 'target_boxes_src': [], + } + + all_names = np.array(['bg', *self.class_names]) + for idx, cur_class_names in enumerate(self.class_names_each_head): + heatmap_list, target_boxes_list, inds_list, masks_list, target_boxes_src_list = [], [], [], [], [] + for bs_idx in range(batch_size): + cur_gt_boxes = gt_boxes[bs_idx] + gt_class_names = all_names[cur_gt_boxes[:, -1].cpu().long().numpy()] + + gt_boxes_single_head = [] + + for idx, name in enumerate(gt_class_names): + if name not in cur_class_names: + continue + temp_box = cur_gt_boxes[idx] + temp_box[-1] = cur_class_names.index(name) + 1 + gt_boxes_single_head.append(temp_box[None, :]) + + if len(gt_boxes_single_head) == 0: + gt_boxes_single_head = cur_gt_boxes[:0, :] + else: + gt_boxes_single_head = torch.cat(gt_boxes_single_head, dim=0) + + heatmap, ret_boxes, inds, mask, ret_boxes_src = self.assign_target_of_single_head( + num_classes=len(cur_class_names), gt_boxes=gt_boxes_single_head.cpu(), + feature_map_size=feature_map_size, feature_map_stride=target_assigner_cfg.FEATURE_MAP_STRIDE, + num_max_objs=target_assigner_cfg.NUM_MAX_OBJS, + gaussian_overlap=target_assigner_cfg.GAUSSIAN_OVERLAP, + min_radius=target_assigner_cfg.MIN_RADIUS, + ) + heatmap_list.append(heatmap.to(gt_boxes_single_head.device)) + target_boxes_list.append(ret_boxes.to(gt_boxes_single_head.device)) + inds_list.append(inds.to(gt_boxes_single_head.device)) + masks_list.append(mask.to(gt_boxes_single_head.device)) + target_boxes_src_list.append(ret_boxes_src.to(gt_boxes_single_head.device)) + + ret_dict['heatmaps'].append(torch.stack(heatmap_list, dim=0)) + ret_dict['target_boxes'].append(torch.stack(target_boxes_list, dim=0)) + ret_dict['inds'].append(torch.stack(inds_list, dim=0)) + ret_dict['masks'].append(torch.stack(masks_list, dim=0)) + ret_dict['target_boxes_src'].append(torch.stack(target_boxes_src_list, dim=0)) + return ret_dict + + def sigmoid(self, x): + y = torch.clamp(x.sigmoid(), min=1e-4, max=1 - 1e-4) + return y + + def get_loss(self): + pred_dicts = self.forward_ret_dict['pred_dicts'] + target_dicts = self.forward_ret_dict['target_dicts'] + + tb_dict = {} + loss = 0 + + for idx, pred_dict in enumerate(pred_dicts): + pred_dict['hm'] = self.sigmoid(pred_dict['hm']) + hm_loss = self.hm_loss_func(pred_dict['hm'], target_dicts['heatmaps'][idx]) + hm_loss *= self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['cls_weight'] + + target_boxes = target_dicts['target_boxes'][idx] + pred_boxes = torch.cat([pred_dict[head_name] for head_name in self.separate_head_cfg.HEAD_ORDER], dim=1) + + reg_loss = self.reg_loss_func( + pred_boxes, target_dicts['masks'][idx], target_dicts['inds'][idx], target_boxes + ) + loc_loss = (reg_loss * reg_loss.new_tensor(self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['code_weights'])).sum() + loc_loss = loc_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['loc_weight'] + + loss += hm_loss + loc_loss + tb_dict['hm_loss_head_%d' % idx] = hm_loss.item() + tb_dict['loc_loss_head_%d' % idx] = loc_loss.item() + + if 'iou' in pred_dict or self.model_cfg.get('IOU_REG_LOSS', False): + + batch_box_preds = centernet_utils.decode_bbox_from_pred_dicts( + pred_dict=pred_dict, + point_cloud_range=self.point_cloud_range, voxel_size=self.voxel_size, + feature_map_stride=self.feature_map_stride + ) # (B, H, W, 7 or 9) + + if 'iou' in pred_dict: + batch_box_preds_for_iou = batch_box_preds.permute(0, 3, 1, 2) # (B, 7 or 9, H, W) + + iou_loss = loss_utils.calculate_iou_loss_centerhead( + iou_preds=pred_dict['iou'], + batch_box_preds=batch_box_preds_for_iou.clone().detach(), + mask=target_dicts['masks'][idx], + ind=target_dicts['inds'][idx], gt_boxes=target_dicts['target_boxes_src'][idx] + ) + loss += iou_loss + tb_dict['iou_loss_head_%d' % idx] = iou_loss.item() + + if self.model_cfg.get('IOU_REG_LOSS', False): + iou_reg_loss = loss_utils.calculate_iou_reg_loss_centerhead( + batch_box_preds=batch_box_preds_for_iou, + mask=target_dicts['masks'][idx], + ind=target_dicts['inds'][idx], gt_boxes=target_dicts['target_boxes_src'][idx] + ) + if target_dicts['masks'][idx].sum().item() != 0: + iou_reg_loss = iou_reg_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['loc_weight'] + loss += iou_reg_loss + tb_dict['iou_reg_loss_head_%d' % idx] = iou_reg_loss.item() + else: + loss += (batch_box_preds_for_iou * 0.).sum() + tb_dict['iou_reg_loss_head_%d' % idx] = (batch_box_preds_for_iou * 0.).sum() + + + + tb_dict['rpn_loss'] = loss.item() + return loss, tb_dict + + def generate_predicted_boxes(self, batch_size, pred_dicts): + post_process_cfg = self.model_cfg.POST_PROCESSING + post_center_limit_range = torch.tensor(post_process_cfg.POST_CENTER_LIMIT_RANGE).cuda().float() + + ret_dict = [{ + 'pred_boxes': [], + 'pred_scores': [], + 'pred_labels': [], + } for k in range(batch_size)] + for idx, pred_dict in enumerate(pred_dicts): + batch_hm = pred_dict['hm'].sigmoid() + batch_center = pred_dict['center'] + batch_center_z = pred_dict['center_z'] + batch_dim = pred_dict['dim'].exp() + batch_rot_cos = pred_dict['rot'][:, 0].unsqueeze(dim=1) + batch_rot_sin = pred_dict['rot'][:, 1].unsqueeze(dim=1) + batch_vel = pred_dict['vel'] if 'vel' in self.separate_head_cfg.HEAD_ORDER else None + + batch_iou = (pred_dict['iou'] + 1) * 0.5 if 'iou' in pred_dict else None + + final_pred_dicts = centernet_utils.decode_bbox_from_heatmap( + heatmap=batch_hm, rot_cos=batch_rot_cos, rot_sin=batch_rot_sin, + center=batch_center, center_z=batch_center_z, dim=batch_dim, vel=batch_vel, iou=batch_iou, + point_cloud_range=self.point_cloud_range, voxel_size=self.voxel_size, + feature_map_stride=self.feature_map_stride, + K=post_process_cfg.MAX_OBJ_PER_SAMPLE, + circle_nms=(post_process_cfg.NMS_CONFIG.NMS_TYPE == 'circle_nms'), + score_thresh=post_process_cfg.SCORE_THRESH, + post_center_limit_range=post_center_limit_range + ) + + for k, final_dict in enumerate(final_pred_dicts): + final_dict['pred_labels'] = self.class_id_mapping_each_head[idx][final_dict['pred_labels'].long()] + + if post_process_cfg.get('USE_IOU_TO_RECTIFY_SCORE', False) and 'pred_iou' in final_dict: + pred_iou = torch.clamp(final_dict['pred_iou'], min=0, max=1.0) + IOU_RECTIFIER = final_dict['pred_scores'].new_tensor(post_process_cfg.IOU_RECTIFIER) + final_dict['pred_scores'] = torch.pow(final_dict['pred_scores'], 1 - IOU_RECTIFIER[final_dict['pred_labels']]) * torch.pow(pred_iou, IOU_RECTIFIER[final_dict['pred_labels']]) + + if post_process_cfg.NMS_CONFIG.NMS_TYPE not in ['circle_nms', 'class_specific_nms']: + selected, selected_scores = model_nms_utils.class_agnostic_nms( + box_scores=final_dict['pred_scores'], box_preds=final_dict['pred_boxes'], + nms_config=post_process_cfg.NMS_CONFIG, + score_thresh=None + ) + + elif post_process_cfg.NMS_CONFIG.NMS_TYPE == 'class_specific_nms': + selected, selected_scores = model_nms_utils.class_specific_nms( + box_scores=final_dict['pred_scores'], box_preds=final_dict['pred_boxes'], + box_labels=final_dict['pred_labels'], nms_config=post_process_cfg.NMS_CONFIG, + score_thresh=post_process_cfg.NMS_CONFIG.get('SCORE_THRESH', None) + ) + elif post_process_cfg.NMS_CONFIG.NMS_TYPE == 'circle_nms': + raise NotImplementedError + + final_dict['pred_boxes'] = final_dict['pred_boxes'][selected] + final_dict['pred_scores'] = selected_scores + final_dict['pred_labels'] = final_dict['pred_labels'][selected] + + ret_dict[k]['pred_boxes'].append(final_dict['pred_boxes']) + ret_dict[k]['pred_scores'].append(final_dict['pred_scores']) + ret_dict[k]['pred_labels'].append(final_dict['pred_labels']) + + for k in range(batch_size): + ret_dict[k]['pred_boxes'] = torch.cat(ret_dict[k]['pred_boxes'], dim=0) + ret_dict[k]['pred_scores'] = torch.cat(ret_dict[k]['pred_scores'], dim=0) + ret_dict[k]['pred_labels'] = torch.cat(ret_dict[k]['pred_labels'], dim=0) + 1 + + return ret_dict + + @staticmethod + def reorder_rois_for_refining(batch_size, pred_dicts): + num_max_rois = max([len(cur_dict['pred_boxes']) for cur_dict in pred_dicts]) + num_max_rois = max(1, num_max_rois) # at least one faked rois to avoid error + pred_boxes = pred_dicts[0]['pred_boxes'] + + rois = pred_boxes.new_zeros((batch_size, num_max_rois, pred_boxes.shape[-1])) + roi_scores = pred_boxes.new_zeros((batch_size, num_max_rois)) + roi_labels = pred_boxes.new_zeros((batch_size, num_max_rois)).long() + + for bs_idx in range(batch_size): + num_boxes = len(pred_dicts[bs_idx]['pred_boxes']) + + rois[bs_idx, :num_boxes, :] = pred_dicts[bs_idx]['pred_boxes'] + roi_scores[bs_idx, :num_boxes] = pred_dicts[bs_idx]['pred_scores'] + roi_labels[bs_idx, :num_boxes] = pred_dicts[bs_idx]['pred_labels'] + return rois, roi_scores, roi_labels + + def forward(self, data_dict): + spatial_features_2d = data_dict['spatial_features_2d'] + x = self.shared_conv(spatial_features_2d) + + pred_dicts = [] + for head in self.heads_list: + pred_dicts.append(head(x)) + + if self.training: + target_dict = self.assign_targets( + data_dict['gt_boxes'], feature_map_size=spatial_features_2d.size()[2:], + feature_map_stride=data_dict.get('spatial_features_2d_strides', None) + ) + self.forward_ret_dict['target_dicts'] = target_dict + + self.forward_ret_dict['pred_dicts'] = pred_dicts + + if not self.training or self.predict_boxes_when_training: + pred_dicts = self.generate_predicted_boxes( + data_dict['batch_size'], pred_dicts + ) + + if self.predict_boxes_when_training: + rois, roi_scores, roi_labels = self.reorder_rois_for_refining(data_dict['batch_size'], pred_dicts) + data_dict['rois'] = rois + data_dict['roi_scores'] = roi_scores + data_dict['roi_labels'] = roi_labels + data_dict['has_class_labels'] = True + else: + data_dict['final_box_dicts'] = pred_dicts + + return data_dict diff --git a/toolbox/openpcdet/pcdet/models/dense_heads/point_head_box.py b/toolbox/openpcdet/pcdet/models/dense_heads/point_head_box.py new file mode 100644 index 000000000..06930b91b --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/dense_heads/point_head_box.py @@ -0,0 +1,115 @@ +import torch + +from ...utils import box_coder_utils, box_utils +from .point_head_template import PointHeadTemplate + + +class PointHeadBox(PointHeadTemplate): + """ + A simple point-based segmentation head, which are used for PointRCNN. + Reference Paper: https://arxiv.org/abs/1812.04244 + PointRCNN: 3D Object Proposal Generation and Detection from Point Cloud + """ + def __init__(self, num_class, input_channels, model_cfg, predict_boxes_when_training=False, **kwargs): + super().__init__(model_cfg=model_cfg, num_class=num_class) + self.predict_boxes_when_training = predict_boxes_when_training + self.cls_layers = self.make_fc_layers( + fc_cfg=self.model_cfg.CLS_FC, + input_channels=input_channels, + output_channels=num_class + ) + + target_cfg = self.model_cfg.TARGET_CONFIG + self.box_coder = getattr(box_coder_utils, target_cfg.BOX_CODER)( + **target_cfg.BOX_CODER_CONFIG + ) + self.box_layers = self.make_fc_layers( + fc_cfg=self.model_cfg.REG_FC, + input_channels=input_channels, + output_channels=self.box_coder.code_size + ) + + def assign_targets(self, input_dict): + """ + Args: + input_dict: + point_features: (N1 + N2 + N3 + ..., C) + batch_size: + point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z] + gt_boxes (optional): (B, M, 8) + Returns: + point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored + point_part_labels: (N1 + N2 + N3 + ..., 3) + """ + point_coords = input_dict['point_coords'] + gt_boxes = input_dict['gt_boxes'] + assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape) + assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape) + + batch_size = gt_boxes.shape[0] + extend_gt_boxes = box_utils.enlarge_box3d( + gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH + ).view(batch_size, -1, gt_boxes.shape[-1]) + targets_dict = self.assign_stack_targets( + points=point_coords, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes, + set_ignore_flag=True, use_ball_constraint=False, + ret_part_labels=False, ret_box_labels=True + ) + + return targets_dict + + def get_loss(self, tb_dict=None): + tb_dict = {} if tb_dict is None else tb_dict + point_loss_cls, tb_dict_1 = self.get_cls_layer_loss() + point_loss_box, tb_dict_2 = self.get_box_layer_loss() + + point_loss = point_loss_cls + point_loss_box + tb_dict.update(tb_dict_1) + tb_dict.update(tb_dict_2) + return point_loss, tb_dict + + def forward(self, batch_dict): + """ + Args: + batch_dict: + batch_size: + point_features: (N1 + N2 + N3 + ..., C) or (B, N, C) + point_features_before_fusion: (N1 + N2 + N3 + ..., C) + point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z] + point_labels (optional): (N1 + N2 + N3 + ...) + gt_boxes (optional): (B, M, 8) + Returns: + batch_dict: + point_cls_scores: (N1 + N2 + N3 + ..., 1) + point_part_offset: (N1 + N2 + N3 + ..., 3) + """ + if self.model_cfg.get('USE_POINT_FEATURES_BEFORE_FUSION', False): + point_features = batch_dict['point_features_before_fusion'] + else: + point_features = batch_dict['point_features'] + point_cls_preds = self.cls_layers(point_features) # (total_points, num_class) + point_box_preds = self.box_layers(point_features) # (total_points, box_code_size) + + point_cls_preds_max, _ = point_cls_preds.max(dim=-1) + batch_dict['point_cls_scores'] = torch.sigmoid(point_cls_preds_max) + + ret_dict = {'point_cls_preds': point_cls_preds, + 'point_box_preds': point_box_preds} + if self.training: + targets_dict = self.assign_targets(batch_dict) + ret_dict['point_cls_labels'] = targets_dict['point_cls_labels'] + ret_dict['point_box_labels'] = targets_dict['point_box_labels'] + + if not self.training or self.predict_boxes_when_training: + point_cls_preds, point_box_preds = self.generate_predicted_boxes( + points=batch_dict['point_coords'][:, 1:4], + point_cls_preds=point_cls_preds, point_box_preds=point_box_preds + ) + batch_dict['batch_cls_preds'] = point_cls_preds + batch_dict['batch_box_preds'] = point_box_preds + batch_dict['batch_index'] = batch_dict['point_coords'][:, 0] + batch_dict['cls_preds_normalized'] = False + + self.forward_ret_dict = ret_dict + + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/dense_heads/point_head_simple.py b/toolbox/openpcdet/pcdet/models/dense_heads/point_head_simple.py new file mode 100644 index 000000000..1be84153d --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/dense_heads/point_head_simple.py @@ -0,0 +1,91 @@ +import torch + +from ...utils import box_utils +from .point_head_template import PointHeadTemplate + + +class PointHeadSimple(PointHeadTemplate): + """ + A simple point-based segmentation head, which are used for PV-RCNN keypoint segmentaion. + Reference Paper: https://arxiv.org/abs/1912.13192 + PV-RCNN: Point-Voxel Feature Set Abstraction for 3D Object Detection + """ + def __init__(self, num_class, input_channels, model_cfg, **kwargs): + super().__init__(model_cfg=model_cfg, num_class=num_class) + self.cls_layers = self.make_fc_layers( + fc_cfg=self.model_cfg.CLS_FC, + input_channels=input_channels, + output_channels=num_class + ) + + def assign_targets(self, input_dict): + """ + Args: + input_dict: + point_features: (N1 + N2 + N3 + ..., C) + batch_size: + point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z] + gt_boxes (optional): (B, M, 8) + Returns: + point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored + point_part_labels: (N1 + N2 + N3 + ..., 3) + """ + point_coords = input_dict['point_coords'] + gt_boxes = input_dict['gt_boxes'] + assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape) + assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape) + + batch_size = gt_boxes.shape[0] + extend_gt_boxes = box_utils.enlarge_box3d( + gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH + ).view(batch_size, -1, gt_boxes.shape[-1]) + targets_dict = self.assign_stack_targets( + points=point_coords, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes, + set_ignore_flag=True, use_ball_constraint=False, + ret_part_labels=False + ) + + return targets_dict + + def get_loss(self, tb_dict=None): + tb_dict = {} if tb_dict is None else tb_dict + point_loss_cls, tb_dict_1 = self.get_cls_layer_loss() + + point_loss = point_loss_cls + tb_dict.update(tb_dict_1) + return point_loss, tb_dict + + def forward(self, batch_dict): + """ + Args: + batch_dict: + batch_size: + point_features: (N1 + N2 + N3 + ..., C) or (B, N, C) + point_features_before_fusion: (N1 + N2 + N3 + ..., C) + point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z] + point_labels (optional): (N1 + N2 + N3 + ...) + gt_boxes (optional): (B, M, 8) + Returns: + batch_dict: + point_cls_scores: (N1 + N2 + N3 + ..., 1) + point_part_offset: (N1 + N2 + N3 + ..., 3) + """ + if self.model_cfg.get('USE_POINT_FEATURES_BEFORE_FUSION', False): + point_features = batch_dict['point_features_before_fusion'] + else: + point_features = batch_dict['point_features'] + point_cls_preds = self.cls_layers(point_features) # (total_points, num_class) + + ret_dict = { + 'point_cls_preds': point_cls_preds, + } + + point_cls_scores = torch.sigmoid(point_cls_preds) + batch_dict['point_cls_scores'], _ = point_cls_scores.max(dim=-1) + + if self.training: + targets_dict = self.assign_targets(batch_dict) + ret_dict['point_cls_labels'] = targets_dict['point_cls_labels'] + self.forward_ret_dict = ret_dict + + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/dense_heads/point_head_template.py b/toolbox/openpcdet/pcdet/models/dense_heads/point_head_template.py new file mode 100644 index 000000000..9ea0af0db --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/dense_heads/point_head_template.py @@ -0,0 +1,210 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ...ops.roiaware_pool3d import roiaware_pool3d_utils +from ...utils import common_utils, loss_utils + + +class PointHeadTemplate(nn.Module): + def __init__(self, model_cfg, num_class): + super().__init__() + self.model_cfg = model_cfg + self.num_class = num_class + + self.build_losses(self.model_cfg.LOSS_CONFIG) + self.forward_ret_dict = None + + def build_losses(self, losses_cfg): + self.add_module( + 'cls_loss_func', + loss_utils.SigmoidFocalClassificationLoss(alpha=0.25, gamma=2.0) + ) + reg_loss_type = losses_cfg.get('LOSS_REG', None) + if reg_loss_type == 'smooth-l1': + self.reg_loss_func = F.smooth_l1_loss + elif reg_loss_type == 'l1': + self.reg_loss_func = F.l1_loss + elif reg_loss_type == 'WeightedSmoothL1Loss': + self.reg_loss_func = loss_utils.WeightedSmoothL1Loss( + code_weights=losses_cfg.LOSS_WEIGHTS.get('code_weights', None) + ) + else: + self.reg_loss_func = F.smooth_l1_loss + + @staticmethod + def make_fc_layers(fc_cfg, input_channels, output_channels): + fc_layers = [] + c_in = input_channels + for k in range(0, fc_cfg.__len__()): + fc_layers.extend([ + nn.Linear(c_in, fc_cfg[k], bias=False), + nn.BatchNorm1d(fc_cfg[k]), + nn.ReLU(), + ]) + c_in = fc_cfg[k] + fc_layers.append(nn.Linear(c_in, output_channels, bias=True)) + return nn.Sequential(*fc_layers) + + def assign_stack_targets(self, points, gt_boxes, extend_gt_boxes=None, + ret_box_labels=False, ret_part_labels=False, + set_ignore_flag=True, use_ball_constraint=False, central_radius=2.0): + """ + Args: + points: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z] + gt_boxes: (B, M, 8) + extend_gt_boxes: [B, M, 8] + ret_box_labels: + ret_part_labels: + set_ignore_flag: + use_ball_constraint: + central_radius: + + Returns: + point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored + point_box_labels: (N1 + N2 + N3 + ..., code_size) + + """ + assert len(points.shape) == 2 and points.shape[1] == 4, 'points.shape=%s' % str(points.shape) + assert len(gt_boxes.shape) == 3 and gt_boxes.shape[2] == 8, 'gt_boxes.shape=%s' % str(gt_boxes.shape) + assert extend_gt_boxes is None or len(extend_gt_boxes.shape) == 3 and extend_gt_boxes.shape[2] == 8, \ + 'extend_gt_boxes.shape=%s' % str(extend_gt_boxes.shape) + assert set_ignore_flag != use_ball_constraint, 'Choose one only!' + batch_size = gt_boxes.shape[0] + bs_idx = points[:, 0] + point_cls_labels = points.new_zeros(points.shape[0]).long() + point_box_labels = gt_boxes.new_zeros((points.shape[0], 8)) if ret_box_labels else None + point_part_labels = gt_boxes.new_zeros((points.shape[0], 3)) if ret_part_labels else None + for k in range(batch_size): + bs_mask = (bs_idx == k) + points_single = points[bs_mask][:, 1:4] + point_cls_labels_single = point_cls_labels.new_zeros(bs_mask.sum()) + box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu( + points_single.unsqueeze(dim=0), gt_boxes[k:k + 1, :, 0:7].contiguous() + ).long().squeeze(dim=0) + box_fg_flag = (box_idxs_of_pts >= 0) + if set_ignore_flag: + extend_box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu( + points_single.unsqueeze(dim=0), extend_gt_boxes[k:k+1, :, 0:7].contiguous() + ).long().squeeze(dim=0) + fg_flag = box_fg_flag + ignore_flag = fg_flag ^ (extend_box_idxs_of_pts >= 0) + point_cls_labels_single[ignore_flag] = -1 + elif use_ball_constraint: + box_centers = gt_boxes[k][box_idxs_of_pts][:, 0:3].clone() + box_centers[:, 2] += gt_boxes[k][box_idxs_of_pts][:, 5] / 2 + ball_flag = ((box_centers - points_single).norm(dim=1) < central_radius) + fg_flag = box_fg_flag & ball_flag + else: + raise NotImplementedError + + gt_box_of_fg_points = gt_boxes[k][box_idxs_of_pts[fg_flag]] + point_cls_labels_single[fg_flag] = 1 if self.num_class == 1 else gt_box_of_fg_points[:, -1].long() + point_cls_labels[bs_mask] = point_cls_labels_single + + if ret_box_labels and gt_box_of_fg_points.shape[0] > 0: + point_box_labels_single = point_box_labels.new_zeros((bs_mask.sum(), 8)) + fg_point_box_labels = self.box_coder.encode_torch( + gt_boxes=gt_box_of_fg_points[:, :-1], points=points_single[fg_flag], + gt_classes=gt_box_of_fg_points[:, -1].long() + ) + point_box_labels_single[fg_flag] = fg_point_box_labels + point_box_labels[bs_mask] = point_box_labels_single + + if ret_part_labels: + point_part_labels_single = point_part_labels.new_zeros((bs_mask.sum(), 3)) + transformed_points = points_single[fg_flag] - gt_box_of_fg_points[:, 0:3] + transformed_points = common_utils.rotate_points_along_z( + transformed_points.view(-1, 1, 3), -gt_box_of_fg_points[:, 6] + ).view(-1, 3) + offset = torch.tensor([0.5, 0.5, 0.5]).view(1, 3).type_as(transformed_points) + point_part_labels_single[fg_flag] = (transformed_points / gt_box_of_fg_points[:, 3:6]) + offset + point_part_labels[bs_mask] = point_part_labels_single + + targets_dict = { + 'point_cls_labels': point_cls_labels, + 'point_box_labels': point_box_labels, + 'point_part_labels': point_part_labels + } + return targets_dict + + def get_cls_layer_loss(self, tb_dict=None): + point_cls_labels = self.forward_ret_dict['point_cls_labels'].view(-1) + point_cls_preds = self.forward_ret_dict['point_cls_preds'].view(-1, self.num_class) + + positives = (point_cls_labels > 0) + negative_cls_weights = (point_cls_labels == 0) * 1.0 + cls_weights = (negative_cls_weights + 1.0 * positives).float() + pos_normalizer = positives.sum(dim=0).float() + cls_weights /= torch.clamp(pos_normalizer, min=1.0) + + one_hot_targets = point_cls_preds.new_zeros(*list(point_cls_labels.shape), self.num_class + 1) + one_hot_targets.scatter_(-1, (point_cls_labels * (point_cls_labels >= 0).long()).unsqueeze(dim=-1).long(), 1.0) + one_hot_targets = one_hot_targets[..., 1:] + cls_loss_src = self.cls_loss_func(point_cls_preds, one_hot_targets, weights=cls_weights) + point_loss_cls = cls_loss_src.sum() + + loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS + point_loss_cls = point_loss_cls * loss_weights_dict['point_cls_weight'] + if tb_dict is None: + tb_dict = {} + tb_dict.update({ + 'point_loss_cls': point_loss_cls.item(), + 'point_pos_num': pos_normalizer.item() + }) + return point_loss_cls, tb_dict + + def get_part_layer_loss(self, tb_dict=None): + pos_mask = self.forward_ret_dict['point_cls_labels'] > 0 + pos_normalizer = max(1, (pos_mask > 0).sum().item()) + point_part_labels = self.forward_ret_dict['point_part_labels'] + point_part_preds = self.forward_ret_dict['point_part_preds'] + point_loss_part = F.binary_cross_entropy(torch.sigmoid(point_part_preds), point_part_labels, reduction='none') + point_loss_part = (point_loss_part.sum(dim=-1) * pos_mask.float()).sum() / (3 * pos_normalizer) + + loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS + point_loss_part = point_loss_part * loss_weights_dict['point_part_weight'] + if tb_dict is None: + tb_dict = {} + tb_dict.update({'point_loss_part': point_loss_part.item()}) + return point_loss_part, tb_dict + + def get_box_layer_loss(self, tb_dict=None): + pos_mask = self.forward_ret_dict['point_cls_labels'] > 0 + point_box_labels = self.forward_ret_dict['point_box_labels'] + point_box_preds = self.forward_ret_dict['point_box_preds'] + + reg_weights = pos_mask.float() + pos_normalizer = pos_mask.sum().float() + reg_weights /= torch.clamp(pos_normalizer, min=1.0) + + point_loss_box_src = self.reg_loss_func( + point_box_preds[None, ...], point_box_labels[None, ...], weights=reg_weights[None, ...] + ) + point_loss_box = point_loss_box_src.sum() + + loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS + point_loss_box = point_loss_box * loss_weights_dict['point_box_weight'] + if tb_dict is None: + tb_dict = {} + tb_dict.update({'point_loss_box': point_loss_box.item()}) + return point_loss_box, tb_dict + + def generate_predicted_boxes(self, points, point_cls_preds, point_box_preds): + """ + Args: + points: (N, 3) + point_cls_preds: (N, num_class) + point_box_preds: (N, box_code_size) + Returns: + point_cls_preds: (N, num_class) + point_box_preds: (N, box_code_size) + + """ + _, pred_classes = point_cls_preds.max(dim=-1) + point_box_preds = self.box_coder.decode_torch(point_box_preds, points, pred_classes + 1) + + return point_cls_preds, point_box_preds + + def forward(self, **kwargs): + raise NotImplementedError diff --git a/toolbox/openpcdet/pcdet/models/dense_heads/point_intra_part_head.py b/toolbox/openpcdet/pcdet/models/dense_heads/point_intra_part_head.py new file mode 100644 index 000000000..794365e35 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/dense_heads/point_intra_part_head.py @@ -0,0 +1,127 @@ +import torch + +from ...utils import box_coder_utils, box_utils +from .point_head_template import PointHeadTemplate + + +class PointIntraPartOffsetHead(PointHeadTemplate): + """ + Point-based head for predicting the intra-object part locations. + Reference Paper: https://arxiv.org/abs/1907.03670 + From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network + """ + def __init__(self, num_class, input_channels, model_cfg, predict_boxes_when_training=False, **kwargs): + super().__init__(model_cfg=model_cfg, num_class=num_class) + self.predict_boxes_when_training = predict_boxes_when_training + self.cls_layers = self.make_fc_layers( + fc_cfg=self.model_cfg.CLS_FC, + input_channels=input_channels, + output_channels=num_class + ) + self.part_reg_layers = self.make_fc_layers( + fc_cfg=self.model_cfg.PART_FC, + input_channels=input_channels, + output_channels=3 + ) + target_cfg = self.model_cfg.TARGET_CONFIG + if target_cfg.get('BOX_CODER', None) is not None: + self.box_coder = getattr(box_coder_utils, target_cfg.BOX_CODER)( + **target_cfg.BOX_CODER_CONFIG + ) + self.box_layers = self.make_fc_layers( + fc_cfg=self.model_cfg.REG_FC, + input_channels=input_channels, + output_channels=self.box_coder.code_size + ) + else: + self.box_layers = None + + def assign_targets(self, input_dict): + """ + Args: + input_dict: + point_features: (N1 + N2 + N3 + ..., C) + batch_size: + point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z] + gt_boxes (optional): (B, M, 8) + Returns: + point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored + point_part_labels: (N1 + N2 + N3 + ..., 3) + """ + point_coords = input_dict['point_coords'] + gt_boxes = input_dict['gt_boxes'] + assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape) + assert point_coords.shape.__len__() in [2], 'points.shape=%s' % str(point_coords.shape) + + batch_size = gt_boxes.shape[0] + extend_gt_boxes = box_utils.enlarge_box3d( + gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=self.model_cfg.TARGET_CONFIG.GT_EXTRA_WIDTH + ).view(batch_size, -1, gt_boxes.shape[-1]) + targets_dict = self.assign_stack_targets( + points=point_coords, gt_boxes=gt_boxes, extend_gt_boxes=extend_gt_boxes, + set_ignore_flag=True, use_ball_constraint=False, + ret_part_labels=True, ret_box_labels=(self.box_layers is not None) + ) + + return targets_dict + + def get_loss(self, tb_dict=None): + tb_dict = {} if tb_dict is None else tb_dict + point_loss_cls, tb_dict = self.get_cls_layer_loss(tb_dict) + point_loss_part, tb_dict = self.get_part_layer_loss(tb_dict) + point_loss = point_loss_cls + point_loss_part + + if self.box_layers is not None: + point_loss_box, tb_dict = self.get_box_layer_loss(tb_dict) + point_loss += point_loss_box + return point_loss, tb_dict + + def forward(self, batch_dict): + """ + Args: + batch_dict: + batch_size: + point_features: (N1 + N2 + N3 + ..., C) or (B, N, C) + point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z] + point_labels (optional): (N1 + N2 + N3 + ...) + gt_boxes (optional): (B, M, 8) + Returns: + batch_dict: + point_cls_scores: (N1 + N2 + N3 + ..., 1) + point_part_offset: (N1 + N2 + N3 + ..., 3) + """ + point_features = batch_dict['point_features'] + point_cls_preds = self.cls_layers(point_features) # (total_points, num_class) + point_part_preds = self.part_reg_layers(point_features) + + ret_dict = { + 'point_cls_preds': point_cls_preds, + 'point_part_preds': point_part_preds, + } + if self.box_layers is not None: + point_box_preds = self.box_layers(point_features) + ret_dict['point_box_preds'] = point_box_preds + + point_cls_scores = torch.sigmoid(point_cls_preds) + point_part_offset = torch.sigmoid(point_part_preds) + batch_dict['point_cls_scores'], _ = point_cls_scores.max(dim=-1) + batch_dict['point_part_offset'] = point_part_offset + + if self.training: + targets_dict = self.assign_targets(batch_dict) + ret_dict['point_cls_labels'] = targets_dict['point_cls_labels'] + ret_dict['point_part_labels'] = targets_dict.get('point_part_labels') + ret_dict['point_box_labels'] = targets_dict.get('point_box_labels') + + if self.box_layers is not None and (not self.training or self.predict_boxes_when_training): + point_cls_preds, point_box_preds = self.generate_predicted_boxes( + points=batch_dict['point_coords'][:, 1:4], + point_cls_preds=point_cls_preds, point_box_preds=ret_dict['point_box_preds'] + ) + batch_dict['batch_cls_preds'] = point_cls_preds + batch_dict['batch_box_preds'] = point_box_preds + batch_dict['batch_index'] = batch_dict['point_coords'][:, 0] + batch_dict['cls_preds_normalized'] = False + + self.forward_ret_dict = ret_dict + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/__init__.py b/toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/anchor_generator.py b/toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/anchor_generator.py new file mode 100644 index 000000000..0aa686110 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/anchor_generator.py @@ -0,0 +1,79 @@ +import torch + + +class AnchorGenerator(object): + def __init__(self, anchor_range, anchor_generator_config): + super().__init__() + self.anchor_generator_cfg = anchor_generator_config + self.anchor_range = anchor_range + self.anchor_sizes = [config['anchor_sizes'] for config in anchor_generator_config] + self.anchor_rotations = [config['anchor_rotations'] for config in anchor_generator_config] + self.anchor_heights = [config['anchor_bottom_heights'] for config in anchor_generator_config] + self.align_center = [config.get('align_center', False) for config in anchor_generator_config] + + assert len(self.anchor_sizes) == len(self.anchor_rotations) == len(self.anchor_heights) + self.num_of_anchor_sets = len(self.anchor_sizes) + + def generate_anchors(self, grid_sizes): + assert len(grid_sizes) == self.num_of_anchor_sets + all_anchors = [] + num_anchors_per_location = [] + for grid_size, anchor_size, anchor_rotation, anchor_height, align_center in zip( + grid_sizes, self.anchor_sizes, self.anchor_rotations, self.anchor_heights, self.align_center): + + num_anchors_per_location.append(len(anchor_rotation) * len(anchor_size) * len(anchor_height)) + if align_center: + x_stride = (self.anchor_range[3] - self.anchor_range[0]) / grid_size[0] + y_stride = (self.anchor_range[4] - self.anchor_range[1]) / grid_size[1] + x_offset, y_offset = x_stride / 2, y_stride / 2 + else: + x_stride = (self.anchor_range[3] - self.anchor_range[0]) / (grid_size[0] - 1) + y_stride = (self.anchor_range[4] - self.anchor_range[1]) / (grid_size[1] - 1) + x_offset, y_offset = 0, 0 + + x_shifts = torch.arange( + self.anchor_range[0] + x_offset, self.anchor_range[3] + 1e-5, step=x_stride, dtype=torch.float32, + ).cuda() + y_shifts = torch.arange( + self.anchor_range[1] + y_offset, self.anchor_range[4] + 1e-5, step=y_stride, dtype=torch.float32, + ).cuda() + z_shifts = x_shifts.new_tensor(anchor_height) + + num_anchor_size, num_anchor_rotation = anchor_size.__len__(), anchor_rotation.__len__() + anchor_rotation = x_shifts.new_tensor(anchor_rotation) + anchor_size = x_shifts.new_tensor(anchor_size) + x_shifts, y_shifts, z_shifts = torch.meshgrid([ + x_shifts, y_shifts, z_shifts + ]) # [x_grid, y_grid, z_grid] + anchors = torch.stack((x_shifts, y_shifts, z_shifts), dim=-1) # [x, y, z, 3] + anchors = anchors[:, :, :, None, :].repeat(1, 1, 1, anchor_size.shape[0], 1) + anchor_size = anchor_size.view(1, 1, 1, -1, 3).repeat([*anchors.shape[0:3], 1, 1]) + anchors = torch.cat((anchors, anchor_size), dim=-1) + anchors = anchors[:, :, :, :, None, :].repeat(1, 1, 1, 1, num_anchor_rotation, 1) + anchor_rotation = anchor_rotation.view(1, 1, 1, 1, -1, 1).repeat([*anchors.shape[0:3], num_anchor_size, 1, 1]) + anchors = torch.cat((anchors, anchor_rotation), dim=-1) # [x, y, z, num_size, num_rot, 7] + + anchors = anchors.permute(2, 1, 0, 3, 4, 5).contiguous() + #anchors = anchors.view(-1, anchors.shape[-1]) + anchors[..., 2] += anchors[..., 5] / 2 # shift to box centers + all_anchors.append(anchors) + return all_anchors, num_anchors_per_location + + +if __name__ == '__main__': + from easydict import EasyDict + config = [ + EasyDict({ + 'anchor_sizes': [[2.1, 4.7, 1.7], [0.86, 0.91, 1.73], [0.84, 1.78, 1.78]], + 'anchor_rotations': [0, 1.57], + 'anchor_heights': [0, 0.5] + }) + ] + + A = AnchorGenerator( + anchor_range=[-75.2, -75.2, -2, 75.2, 75.2, 4], + anchor_generator_config=config + ) + import pdb + pdb.set_trace() + A.generate_anchors([[188, 188]]) diff --git a/toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/atss_target_assigner.py b/toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/atss_target_assigner.py new file mode 100644 index 000000000..112af2cf7 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/atss_target_assigner.py @@ -0,0 +1,141 @@ +import torch + +from ....ops.iou3d_nms import iou3d_nms_utils +from ....utils import common_utils + + +class ATSSTargetAssigner(object): + """ + Reference: https://arxiv.org/abs/1912.02424 + """ + def __init__(self, topk, box_coder, match_height=False): + self.topk = topk + self.box_coder = box_coder + self.match_height = match_height + + def assign_targets(self, anchors_list, gt_boxes_with_classes, use_multihead=False): + """ + Args: + anchors: [(N, 7), ...] + gt_boxes: (B, M, 8) + Returns: + + """ + if not isinstance(anchors_list, list): + anchors_list = [anchors_list] + single_set_of_anchor = True + else: + single_set_of_anchor = len(anchors_list) == 1 + cls_labels_list, reg_targets_list, reg_weights_list = [], [], [] + for anchors in anchors_list: + batch_size = gt_boxes_with_classes.shape[0] + gt_classes = gt_boxes_with_classes[:, :, -1] + gt_boxes = gt_boxes_with_classes[:, :, :-1] + if use_multihead: + anchors = anchors.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchors.shape[-1]) + else: + anchors = anchors.view(-1, anchors.shape[-1]) + cls_labels, reg_targets, reg_weights = [], [], [] + for k in range(batch_size): + cur_gt = gt_boxes[k] + cnt = cur_gt.__len__() - 1 + while cnt > 0 and cur_gt[cnt].sum() == 0: + cnt -= 1 + cur_gt = cur_gt[:cnt + 1] + + cur_gt_classes = gt_classes[k][:cnt + 1] + cur_cls_labels, cur_reg_targets, cur_reg_weights = self.assign_targets_single( + anchors, cur_gt, cur_gt_classes + ) + cls_labels.append(cur_cls_labels) + reg_targets.append(cur_reg_targets) + reg_weights.append(cur_reg_weights) + + cls_labels = torch.stack(cls_labels, dim=0) + reg_targets = torch.stack(reg_targets, dim=0) + reg_weights = torch.stack(reg_weights, dim=0) + cls_labels_list.append(cls_labels) + reg_targets_list.append(reg_targets) + reg_weights_list.append(reg_weights) + + if single_set_of_anchor: + ret_dict = { + 'box_cls_labels': cls_labels_list[0], + 'box_reg_targets': reg_targets_list[0], + 'reg_weights': reg_weights_list[0] + } + else: + ret_dict = { + 'box_cls_labels': torch.cat(cls_labels_list, dim=1), + 'box_reg_targets': torch.cat(reg_targets_list, dim=1), + 'reg_weights': torch.cat(reg_weights_list, dim=1) + } + return ret_dict + + def assign_targets_single(self, anchors, gt_boxes, gt_classes): + """ + Args: + anchors: (N, 7) [x, y, z, dx, dy, dz, heading] + gt_boxes: (M, 7) [x, y, z, dx, dy, dz, heading] + gt_classes: (M) + Returns: + + """ + num_anchor = anchors.shape[0] + num_gt = gt_boxes.shape[0] + + # select topk anchors for each gt_boxes + if self.match_height: + ious = iou3d_nms_utils.boxes_iou3d_gpu(anchors[:, 0:7], gt_boxes[:, 0:7]) # (N, M) + else: + ious = iou3d_nms_utils.boxes_iou_bev(anchors[:, 0:7], gt_boxes[:, 0:7]) + + distance = (anchors[:, None, 0:3] - gt_boxes[None, :, 0:3]).norm(dim=-1) # (N, M) + _, topk_idxs = distance.topk(self.topk, dim=0, largest=False) # (K, M) + candidate_ious = ious[topk_idxs, torch.arange(num_gt)] # (K, M) + iou_mean_per_gt = candidate_ious.mean(dim=0) + iou_std_per_gt = candidate_ious.std(dim=0) + iou_thresh_per_gt = iou_mean_per_gt + iou_std_per_gt + 1e-6 + is_pos = candidate_ious >= iou_thresh_per_gt[None, :] # (K, M) + + # check whether anchor_center in gt_boxes, only check BEV x-y axes + candidate_anchors = anchors[topk_idxs.view(-1)] # (KxM, 7) + gt_boxes_of_each_anchor = gt_boxes[:, :].repeat(self.topk, 1) # (KxM, 7) + xyz_local = candidate_anchors[:, 0:3] - gt_boxes_of_each_anchor[:, 0:3] + xyz_local = common_utils.rotate_points_along_z( + xyz_local[:, None, :], -gt_boxes_of_each_anchor[:, 6] + ).squeeze(dim=1) + xy_local = xyz_local[:, 0:2] + lw = gt_boxes_of_each_anchor[:, 3:5][:, [1, 0]] # bugfixed: w ==> y, l ==> x in local coords + is_in_gt = ((xy_local <= lw / 2) & (xy_local >= -lw / 2)).all(dim=-1).view(-1, num_gt) # (K, M) + is_pos = is_pos & is_in_gt # (K, M) + + for ng in range(num_gt): + topk_idxs[:, ng] += ng * num_anchor + + # select the highest IoU if an anchor box is assigned with multiple gt_boxes + INF = -0x7FFFFFFF + ious_inf = torch.full_like(ious, INF).t().contiguous().view(-1) # (MxN) + index = topk_idxs.view(-1)[is_pos.view(-1)] + ious_inf[index] = ious.t().contiguous().view(-1)[index] + ious_inf = ious_inf.view(num_gt, -1).t() # (N, M) + + anchors_to_gt_values, anchors_to_gt_indexs = ious_inf.max(dim=1) + + # match the gt_boxes to the anchors which have maximum iou with them + max_iou_of_each_gt, argmax_iou_of_each_gt = ious.max(dim=0) + anchors_to_gt_indexs[argmax_iou_of_each_gt] = torch.arange(0, num_gt, device=ious.device) + anchors_to_gt_values[argmax_iou_of_each_gt] = max_iou_of_each_gt + + cls_labels = gt_classes[anchors_to_gt_indexs] + cls_labels[anchors_to_gt_values == INF] = 0 + matched_gts = gt_boxes[anchors_to_gt_indexs] + + pos_mask = cls_labels > 0 + reg_targets = matched_gts.new_zeros((num_anchor, self.box_coder.code_size)) + reg_weights = matched_gts.new_zeros(num_anchor) + if pos_mask.sum() > 0: + reg_targets[pos_mask > 0] = self.box_coder.encode_torch(matched_gts[pos_mask > 0], anchors[pos_mask > 0]) + reg_weights[pos_mask] = 1.0 + + return cls_labels, reg_targets, reg_weights diff --git a/toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/axis_aligned_target_assigner.py b/toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/axis_aligned_target_assigner.py new file mode 100644 index 000000000..4dcd6e93c --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/axis_aligned_target_assigner.py @@ -0,0 +1,210 @@ +import numpy as np +import torch + +from ....ops.iou3d_nms import iou3d_nms_utils +from ....utils import box_utils + + +class AxisAlignedTargetAssigner(object): + def __init__(self, model_cfg, class_names, box_coder, match_height=False): + super().__init__() + + anchor_generator_cfg = model_cfg.ANCHOR_GENERATOR_CONFIG + anchor_target_cfg = model_cfg.TARGET_ASSIGNER_CONFIG + self.box_coder = box_coder + self.match_height = match_height + self.class_names = np.array(class_names) + self.anchor_class_names = [config['class_name'] for config in anchor_generator_cfg] + self.pos_fraction = anchor_target_cfg.POS_FRACTION if anchor_target_cfg.POS_FRACTION >= 0 else None + self.sample_size = anchor_target_cfg.SAMPLE_SIZE + self.norm_by_num_examples = anchor_target_cfg.NORM_BY_NUM_EXAMPLES + self.matched_thresholds = {} + self.unmatched_thresholds = {} + for config in anchor_generator_cfg: + self.matched_thresholds[config['class_name']] = config['matched_threshold'] + self.unmatched_thresholds[config['class_name']] = config['unmatched_threshold'] + + self.use_multihead = model_cfg.get('USE_MULTIHEAD', False) + # self.separate_multihead = model_cfg.get('SEPARATE_MULTIHEAD', False) + # if self.seperate_multihead: + # rpn_head_cfgs = model_cfg.RPN_HEAD_CFGS + # self.gt_remapping = {} + # for rpn_head_cfg in rpn_head_cfgs: + # for idx, name in enumerate(rpn_head_cfg['HEAD_CLS_NAME']): + # self.gt_remapping[name] = idx + 1 + + def assign_targets(self, all_anchors, gt_boxes_with_classes): + """ + Args: + all_anchors: [(N, 7), ...] + gt_boxes: (B, M, 8) + Returns: + + """ + + bbox_targets = [] + cls_labels = [] + reg_weights = [] + + batch_size = gt_boxes_with_classes.shape[0] + gt_classes = gt_boxes_with_classes[:, :, -1] + gt_boxes = gt_boxes_with_classes[:, :, :-1] + for k in range(batch_size): + cur_gt = gt_boxes[k] + cnt = cur_gt.__len__() - 1 + while cnt > 0 and cur_gt[cnt].sum() == 0: + cnt -= 1 + cur_gt = cur_gt[:cnt + 1] + cur_gt_classes = gt_classes[k][:cnt + 1].int() + + target_list = [] + for anchor_class_name, anchors in zip(self.anchor_class_names, all_anchors): + if cur_gt_classes.shape[0] > 1: + mask = torch.from_numpy(self.class_names[cur_gt_classes.cpu() - 1] == anchor_class_name) + else: + mask = torch.tensor([self.class_names[c - 1] == anchor_class_name + for c in cur_gt_classes], dtype=torch.bool) + + if self.use_multihead: + anchors = anchors.permute(3, 4, 0, 1, 2, 5).contiguous().view(-1, anchors.shape[-1]) + # if self.seperate_multihead: + # selected_classes = cur_gt_classes[mask].clone() + # if len(selected_classes) > 0: + # new_cls_id = self.gt_remapping[anchor_class_name] + # selected_classes[:] = new_cls_id + # else: + # selected_classes = cur_gt_classes[mask] + selected_classes = cur_gt_classes[mask] + else: + feature_map_size = anchors.shape[:3] + anchors = anchors.view(-1, anchors.shape[-1]) + selected_classes = cur_gt_classes[mask] + + single_target = self.assign_targets_single( + anchors, + cur_gt[mask], + gt_classes=selected_classes, + matched_threshold=self.matched_thresholds[anchor_class_name], + unmatched_threshold=self.unmatched_thresholds[anchor_class_name] + ) + target_list.append(single_target) + + if self.use_multihead: + target_dict = { + 'box_cls_labels': [t['box_cls_labels'].view(-1) for t in target_list], + 'box_reg_targets': [t['box_reg_targets'].view(-1, self.box_coder.code_size) for t in target_list], + 'reg_weights': [t['reg_weights'].view(-1) for t in target_list] + } + + target_dict['box_reg_targets'] = torch.cat(target_dict['box_reg_targets'], dim=0) + target_dict['box_cls_labels'] = torch.cat(target_dict['box_cls_labels'], dim=0).view(-1) + target_dict['reg_weights'] = torch.cat(target_dict['reg_weights'], dim=0).view(-1) + else: + target_dict = { + 'box_cls_labels': [t['box_cls_labels'].view(*feature_map_size, -1) for t in target_list], + 'box_reg_targets': [t['box_reg_targets'].view(*feature_map_size, -1, self.box_coder.code_size) + for t in target_list], + 'reg_weights': [t['reg_weights'].view(*feature_map_size, -1) for t in target_list] + } + target_dict['box_reg_targets'] = torch.cat( + target_dict['box_reg_targets'], dim=-2 + ).view(-1, self.box_coder.code_size) + + target_dict['box_cls_labels'] = torch.cat(target_dict['box_cls_labels'], dim=-1).view(-1) + target_dict['reg_weights'] = torch.cat(target_dict['reg_weights'], dim=-1).view(-1) + + bbox_targets.append(target_dict['box_reg_targets']) + cls_labels.append(target_dict['box_cls_labels']) + reg_weights.append(target_dict['reg_weights']) + + bbox_targets = torch.stack(bbox_targets, dim=0) + + cls_labels = torch.stack(cls_labels, dim=0) + reg_weights = torch.stack(reg_weights, dim=0) + all_targets_dict = { + 'box_cls_labels': cls_labels, + 'box_reg_targets': bbox_targets, + 'reg_weights': reg_weights + + } + return all_targets_dict + + def assign_targets_single(self, anchors, gt_boxes, gt_classes, matched_threshold=0.6, unmatched_threshold=0.45): + + num_anchors = anchors.shape[0] + num_gt = gt_boxes.shape[0] + + labels = torch.ones((num_anchors,), dtype=torch.int32, device=anchors.device) * -1 + gt_ids = torch.ones((num_anchors,), dtype=torch.int32, device=anchors.device) * -1 + + if len(gt_boxes) > 0 and anchors.shape[0] > 0: + anchor_by_gt_overlap = iou3d_nms_utils.boxes_iou3d_gpu(anchors[:, 0:7], gt_boxes[:, 0:7]) \ + if self.match_height else box_utils.boxes3d_nearest_bev_iou(anchors[:, 0:7], gt_boxes[:, 0:7]) + + # NOTE: The speed of these two versions depends the environment and the number of anchors + # anchor_to_gt_argmax = torch.from_numpy(anchor_by_gt_overlap.cpu().numpy().argmax(axis=1)).cuda() + anchor_to_gt_argmax = anchor_by_gt_overlap.argmax(dim=1) + anchor_to_gt_max = anchor_by_gt_overlap[torch.arange(num_anchors, device=anchors.device), anchor_to_gt_argmax] + + # gt_to_anchor_argmax = torch.from_numpy(anchor_by_gt_overlap.cpu().numpy().argmax(axis=0)).cuda() + gt_to_anchor_argmax = anchor_by_gt_overlap.argmax(dim=0) + gt_to_anchor_max = anchor_by_gt_overlap[gt_to_anchor_argmax, torch.arange(num_gt, device=anchors.device)] + empty_gt_mask = gt_to_anchor_max == 0 + gt_to_anchor_max[empty_gt_mask] = -1 + + anchors_with_max_overlap = (anchor_by_gt_overlap == gt_to_anchor_max).nonzero()[:, 0] + gt_inds_force = anchor_to_gt_argmax[anchors_with_max_overlap] + labels[anchors_with_max_overlap] = gt_classes[gt_inds_force] + gt_ids[anchors_with_max_overlap] = gt_inds_force.int() + + pos_inds = anchor_to_gt_max >= matched_threshold + gt_inds_over_thresh = anchor_to_gt_argmax[pos_inds] + labels[pos_inds] = gt_classes[gt_inds_over_thresh] + gt_ids[pos_inds] = gt_inds_over_thresh.int() + bg_inds = (anchor_to_gt_max < unmatched_threshold).nonzero()[:, 0] + else: + bg_inds = torch.arange(num_anchors, device=anchors.device) + + fg_inds = (labels > 0).nonzero()[:, 0] + + if self.pos_fraction is not None: + num_fg = int(self.pos_fraction * self.sample_size) + if len(fg_inds) > num_fg: + num_disabled = len(fg_inds) - num_fg + disable_inds = torch.randperm(len(fg_inds))[:num_disabled] + labels[disable_inds] = -1 + fg_inds = (labels > 0).nonzero()[:, 0] + + num_bg = self.sample_size - (labels > 0).sum() + if len(bg_inds) > num_bg: + enable_inds = bg_inds[torch.randint(0, len(bg_inds), size=(num_bg,))] + labels[enable_inds] = 0 + # bg_inds = torch.nonzero(labels == 0)[:, 0] + else: + if len(gt_boxes) == 0 or anchors.shape[0] == 0: + labels[:] = 0 + else: + labels[bg_inds] = 0 + labels[anchors_with_max_overlap] = gt_classes[gt_inds_force] + + bbox_targets = anchors.new_zeros((num_anchors, self.box_coder.code_size)) + if len(gt_boxes) > 0 and anchors.shape[0] > 0: + fg_gt_boxes = gt_boxes[anchor_to_gt_argmax[fg_inds], :] + fg_anchors = anchors[fg_inds, :] + bbox_targets[fg_inds, :] = self.box_coder.encode_torch(fg_gt_boxes, fg_anchors) + + reg_weights = anchors.new_zeros((num_anchors,)) + + if self.norm_by_num_examples: + num_examples = (labels >= 0).sum() + num_examples = num_examples if num_examples > 1.0 else 1.0 + reg_weights[labels > 0] = 1.0 / num_examples + else: + reg_weights[labels > 0] = 1.0 + + ret_dict = { + 'box_cls_labels': labels, + 'box_reg_targets': bbox_targets, + 'reg_weights': reg_weights, + } + return ret_dict diff --git a/toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/hungarian_assigner.py b/toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/hungarian_assigner.py new file mode 100644 index 000000000..15da89af0 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/dense_heads/target_assigner/hungarian_assigner.py @@ -0,0 +1,131 @@ +import torch +from scipy.optimize import linear_sum_assignment +from pcdet.ops.iou3d_nms import iou3d_nms_cuda + + +def height_overlaps(boxes1, boxes2): + """ + Calculate height overlaps of two boxes. + """ + boxes1_top_height = (boxes1[:,2]+ boxes1[:,5]).view(-1, 1) + boxes1_bottom_height = boxes1[:,2].view(-1, 1) + boxes2_top_height = (boxes2[:,2]+boxes2[:,5]).view(1, -1) + boxes2_bottom_height = boxes2[:,2].view(1, -1) + + heighest_of_bottom = torch.max(boxes1_bottom_height, boxes2_bottom_height) + lowest_of_top = torch.min(boxes1_top_height, boxes2_top_height) + overlaps_h = torch.clamp(lowest_of_top - heighest_of_bottom, min=0) + return overlaps_h + + +def overlaps(boxes1, boxes2): + """ + Calculate 3D overlaps of two boxes. + """ + rows = len(boxes1) + cols = len(boxes2) + if rows * cols == 0: + return boxes1.new(rows, cols) + + # height overlap + overlaps_h = height_overlaps(boxes1, boxes2) + boxes1_bev = boxes1[:,:7] + boxes2_bev = boxes2[:,:7] + + # bev overlap + overlaps_bev = boxes1_bev.new_zeros( + (boxes1_bev.shape[0], boxes2_bev.shape[0]) + ).cuda() # (N, M) + iou3d_nms_cuda.boxes_overlap_bev_gpu( + boxes1_bev.contiguous().cuda(), boxes2_bev.contiguous().cuda(), overlaps_bev + ) + + # 3d overlaps + overlaps_3d = overlaps_bev.to(boxes1.device) * overlaps_h + + volume1 = (boxes1[:, 3] * boxes1[:, 4] * boxes1[:, 5]).view(-1, 1) + volume2 = (boxes2[:, 3] * boxes2[:, 4] * boxes2[:, 5]).view(1, -1) + + iou3d = overlaps_3d / torch.clamp(volume1 + volume2 - overlaps_3d, min=1e-8) + + return iou3d + + + +class HungarianAssigner3D: + def __init__(self, cls_cost, reg_cost, iou_cost): + self.cls_cost = cls_cost + self.reg_cost = reg_cost + self.iou_cost = iou_cost + + def focal_loss_cost(self, cls_pred, gt_labels): + weight = self.cls_cost.get('weight', 0.15) + alpha = self.cls_cost.get('alpha', 0.25) + gamma = self.cls_cost.get('gamma', 2.0) + eps = self.cls_cost.get('eps', 1e-12) + + cls_pred = cls_pred.sigmoid() + neg_cost = -(1 - cls_pred + eps).log() * ( + 1 - alpha) * cls_pred.pow(gamma) + pos_cost = -(cls_pred + eps).log() * alpha * ( + 1 - cls_pred).pow(gamma) + + cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels] + return cls_cost * weight + + def bevbox_cost(self, bboxes, gt_bboxes, point_cloud_range): + weight = self.reg_cost.get('weight', 0.25) + + pc_start = bboxes.new(point_cloud_range[0:2]) + pc_range = bboxes.new(point_cloud_range[3:5]) - bboxes.new(point_cloud_range[0:2]) + # normalize the box center to [0, 1] + normalized_bboxes_xy = (bboxes[:, :2] - pc_start) / pc_range + normalized_gt_bboxes_xy = (gt_bboxes[:, :2] - pc_start) / pc_range + reg_cost = torch.cdist(normalized_bboxes_xy, normalized_gt_bboxes_xy, p=1) + return reg_cost * weight + + def iou3d_cost(self, bboxes, gt_bboxes): + iou = overlaps(bboxes, gt_bboxes) + weight = self.iou_cost.get('weight', 0.25) + iou_cost = - iou + return iou_cost * weight, iou + + def assign(self, bboxes, gt_bboxes, gt_labels, cls_pred, point_cloud_range): + num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0) + + # 1. assign -1 by default + assigned_gt_inds = bboxes.new_full((num_bboxes,), -1, dtype=torch.long) + assigned_labels = bboxes.new_full((num_bboxes,), -1, dtype=torch.long) + if num_gts == 0 or num_bboxes == 0: + # No ground truth or boxes, return empty assignment + if num_gts == 0: + # No ground truth, assign all to background + assigned_gt_inds[:] = 0 + return num_gts, assigned_gt_inds, max_overlaps, assigned_labels + + # 2. compute the weighted costs + cls_cost = self.focal_loss_cost(cls_pred[0].T, gt_labels) + reg_cost = self.bevbox_cost(bboxes, gt_bboxes, point_cloud_range) + iou_cost, iou = self.iou3d_cost(bboxes, gt_bboxes) + + + # weighted sum of above three costs + cost = cls_cost + reg_cost + iou_cost + + # 3. do Hungarian matching on CPU using linear_sum_assignment + cost = cost.detach().cpu() + matched_row_inds, matched_col_inds = linear_sum_assignment(cost) + matched_row_inds = torch.from_numpy(matched_row_inds).to(bboxes.device) + matched_col_inds = torch.from_numpy(matched_col_inds).to(bboxes.device) + + # 4. assign backgrounds and foregrounds + # assign all indices to backgrounds first + assigned_gt_inds[:] = 0 + # assign foregrounds based on matching results + assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 + assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] + + max_overlaps = torch.zeros_like(iou.max(1).values) + max_overlaps[matched_row_inds] = iou[matched_row_inds, matched_col_inds] + + return assigned_gt_inds, max_overlaps \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/models/dense_heads/transfusion_head.py b/toolbox/openpcdet/pcdet/models/dense_heads/transfusion_head.py new file mode 100644 index 000000000..742211b86 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/dense_heads/transfusion_head.py @@ -0,0 +1,479 @@ +import copy +import numpy as np +import torch +from torch import nn +import torch.nn.functional as F +from torch.nn.init import kaiming_normal_ +from ..model_utils.transfusion_utils import clip_sigmoid +from ..model_utils.basic_block_2d import BasicBlock2D +from ..model_utils.transfusion_utils import PositionEmbeddingLearned, TransformerDecoderLayer +from .target_assigner.hungarian_assigner import HungarianAssigner3D +from ...utils import loss_utils +from ..model_utils import centernet_utils + + +class SeparateHead_Transfusion(nn.Module): + def __init__(self, input_channels, head_channels, kernel_size, sep_head_dict, init_bias=-2.19, use_bias=False): + super().__init__() + self.sep_head_dict = sep_head_dict + + for cur_name in self.sep_head_dict: + output_channels = self.sep_head_dict[cur_name]['out_channels'] + num_conv = self.sep_head_dict[cur_name]['num_conv'] + + fc_list = [] + for k in range(num_conv - 1): + fc_list.append(nn.Sequential( + nn.Conv1d(input_channels, head_channels, kernel_size, stride=1, padding=kernel_size//2, bias=use_bias), + nn.BatchNorm1d(head_channels), + nn.ReLU() + )) + fc_list.append(nn.Conv1d(head_channels, output_channels, kernel_size, stride=1, padding=kernel_size//2, bias=True)) + fc = nn.Sequential(*fc_list) + if 'hm' in cur_name: + fc[-1].bias.data.fill_(init_bias) + else: + for m in fc.modules(): + if isinstance(m, nn.Conv2d): + kaiming_normal_(m.weight.data) + if hasattr(m, "bias") and m.bias is not None: + nn.init.constant_(m.bias, 0) + + self.__setattr__(cur_name, fc) + + def forward(self, x): + ret_dict = {} + for cur_name in self.sep_head_dict: + ret_dict[cur_name] = self.__getattr__(cur_name)(x) + + return ret_dict + + + +class TransFusionHead(nn.Module): + """ + This module implements TransFusionHead. + The code is adapted from https://github.com/mit-han-lab/bevfusion/ with minimal modifications. + """ + def __init__( + self, + model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range, voxel_size, predict_boxes_when_training=True, + ): + super(TransFusionHead, self).__init__() + + self.grid_size = grid_size + self.point_cloud_range = point_cloud_range + self.voxel_size = voxel_size + self.num_classes = num_class + + self.model_cfg = model_cfg + self.feature_map_stride = self.model_cfg.TARGET_ASSIGNER_CONFIG.get('FEATURE_MAP_STRIDE', None) + self.dataset_name = self.model_cfg.TARGET_ASSIGNER_CONFIG.get('DATASET', 'nuScenes') + + hidden_channel=self.model_cfg.HIDDEN_CHANNEL + self.num_proposals = self.model_cfg.NUM_PROPOSALS + self.bn_momentum = self.model_cfg.BN_MOMENTUM + self.nms_kernel_size = self.model_cfg.NMS_KERNEL_SIZE + + num_heads = self.model_cfg.NUM_HEADS + dropout = self.model_cfg.DROPOUT + activation = self.model_cfg.ACTIVATION + ffn_channel = self.model_cfg.FFN_CHANNEL + bias = self.model_cfg.get('USE_BIAS_BEFORE_NORM', False) + + loss_cls = self.model_cfg.LOSS_CONFIG.LOSS_CLS + self.use_sigmoid_cls = loss_cls.get("use_sigmoid", False) + if not self.use_sigmoid_cls: + self.num_classes += 1 + self.loss_cls = loss_utils.SigmoidFocalClassificationLoss(gamma=loss_cls.gamma,alpha=loss_cls.alpha) + self.loss_cls_weight = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['cls_weight'] + self.loss_bbox = loss_utils.L1Loss() + self.loss_bbox_weight = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['bbox_weight'] + self.loss_heatmap = loss_utils.GaussianFocalLoss() + self.loss_heatmap_weight = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['hm_weight'] + + self.code_size = 10 + + # a shared convolution + self.shared_conv = nn.Conv2d(in_channels=input_channels,out_channels=hidden_channel,kernel_size=3,padding=1) + layers = [] + layers.append(BasicBlock2D(hidden_channel,hidden_channel, kernel_size=3,padding=1,bias=bias)) + layers.append(nn.Conv2d(in_channels=hidden_channel,out_channels=num_class,kernel_size=3,padding=1)) + self.heatmap_head = nn.Sequential(*layers) + self.class_encoding = nn.Conv1d(num_class, hidden_channel, 1) + + # transformer decoder layers for object query with LiDAR feature + self.decoder = TransformerDecoderLayer(hidden_channel, num_heads, ffn_channel, dropout, activation, + self_posembed=PositionEmbeddingLearned(2, hidden_channel), + cross_posembed=PositionEmbeddingLearned(2, hidden_channel), + ) + # Prediction Head + heads = copy.deepcopy(self.model_cfg.SEPARATE_HEAD_CFG.HEAD_DICT) + heads['heatmap'] = dict(out_channels=self.num_classes, num_conv=self.model_cfg.NUM_HM_CONV) + self.prediction_head = SeparateHead_Transfusion(hidden_channel, 64, 1, heads, use_bias=bias) + + self.init_weights() + self.bbox_assigner = HungarianAssigner3D(**self.model_cfg.TARGET_ASSIGNER_CONFIG.HUNGARIAN_ASSIGNER) + + # Position Embedding for Cross-Attention, which is re-used during training + x_size = self.grid_size[0] // self.feature_map_stride + y_size = self.grid_size[1] // self.feature_map_stride + self.bev_pos = self.create_2D_grid(x_size, y_size) + + self.forward_ret_dict = {} + + def create_2D_grid(self, x_size, y_size): + meshgrid = [[0, x_size - 1, x_size], [0, y_size - 1, y_size]] + # NOTE: modified + batch_x, batch_y = torch.meshgrid( + *[torch.linspace(it[0], it[1], it[2]) for it in meshgrid] + ) + batch_x = batch_x + 0.5 + batch_y = batch_y + 0.5 + coord_base = torch.cat([batch_x[None], batch_y[None]], dim=0)[None] + coord_base = coord_base.view(1, 2, -1).permute(0, 2, 1) + return coord_base + + def init_weights(self): + # initialize transformer + for m in self.decoder.parameters(): + if m.dim() > 1: + nn.init.xavier_uniform_(m) + if hasattr(self, "query"): + nn.init.xavier_normal_(self.query) + self.init_bn_momentum() + + def init_bn_momentum(self): + for m in self.modules(): + if isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)): + m.momentum = self.bn_momentum + + def predict(self, inputs): + batch_size = inputs.shape[0] + lidar_feat = self.shared_conv(inputs) + + lidar_feat_flatten = lidar_feat.view( + batch_size, lidar_feat.shape[1], -1 + ) + bev_pos = self.bev_pos.repeat(batch_size, 1, 1).to(lidar_feat.device) + + # query initialization + dense_heatmap = self.heatmap_head(lidar_feat) + heatmap = dense_heatmap.detach().sigmoid() + padding = self.nms_kernel_size // 2 + local_max = torch.zeros_like(heatmap) + local_max_inner = F.max_pool2d( + heatmap, kernel_size=self.nms_kernel_size, stride=1, padding=0 + ) + local_max[:, :, padding:(-padding), padding:(-padding)] = local_max_inner + # for Pedestrian & Traffic_cone in nuScenes + if self.dataset_name == "nuScenes": + local_max[ :, 8, ] = F.max_pool2d(heatmap[:, 8], kernel_size=1, stride=1, padding=0) + local_max[ :, 9, ] = F.max_pool2d(heatmap[:, 9], kernel_size=1, stride=1, padding=0) + # for Pedestrian & Cyclist in Waymo + elif self.dataset_name == "Waymo": + local_max[ :, 1, ] = F.max_pool2d(heatmap[:, 1], kernel_size=1, stride=1, padding=0) + local_max[ :, 2, ] = F.max_pool2d(heatmap[:, 2], kernel_size=1, stride=1, padding=0) + heatmap = heatmap * (heatmap == local_max) + heatmap = heatmap.view(batch_size, heatmap.shape[1], -1) + + # top num_proposals among all classes + top_proposals = heatmap.view(batch_size, -1).argsort(dim=-1, descending=True)[ + ..., : self.num_proposals + ] + top_proposals_class = top_proposals // heatmap.shape[-1] + top_proposals_index = top_proposals % heatmap.shape[-1] + query_feat = lidar_feat_flatten.gather( + index=top_proposals_index[:, None, :].expand(-1, lidar_feat_flatten.shape[1], -1), + dim=-1, + ) + self.query_labels = top_proposals_class + + # add category embedding + one_hot = F.one_hot(top_proposals_class, num_classes=self.num_classes).permute(0, 2, 1) + + query_cat_encoding = self.class_encoding(one_hot.float()) + query_feat += query_cat_encoding + + query_pos = bev_pos.gather( + index=top_proposals_index[:, None, :].permute(0, 2, 1).expand(-1, -1, bev_pos.shape[-1]), + dim=1, + ) + # convert to xy + query_pos = query_pos.flip(dims=[-1]) + bev_pos = bev_pos.flip(dims=[-1]) + + query_feat = self.decoder( + query_feat, lidar_feat_flatten, query_pos, bev_pos + ) + res_layer = self.prediction_head(query_feat) + res_layer["center"] = res_layer["center"] + query_pos.permute(0, 2, 1) + + res_layer["query_heatmap_score"] = heatmap.gather( + index=top_proposals_index[:, None, :].expand(-1, self.num_classes, -1), + dim=-1, + ) + res_layer["dense_heatmap"] = dense_heatmap + + return res_layer + + def forward(self, batch_dict): + feats = batch_dict['spatial_features_2d'] + res = self.predict(feats) + if not self.training: + bboxes = self.get_bboxes(res) + batch_dict['final_box_dicts'] = bboxes + else: + gt_boxes = batch_dict['gt_boxes'] + gt_bboxes_3d = gt_boxes[...,:-1] + gt_labels_3d = gt_boxes[...,-1].long() - 1 + loss, tb_dict = self.loss(gt_bboxes_3d, gt_labels_3d, res) + batch_dict['loss'] = loss + batch_dict['tb_dict'] = tb_dict + return batch_dict + + def get_targets(self, gt_bboxes_3d, gt_labels_3d, pred_dicts): + assign_results = [] + for batch_idx in range(len(gt_bboxes_3d)): + pred_dict = {} + for key in pred_dicts.keys(): + pred_dict[key] = pred_dicts[key][batch_idx : batch_idx + 1] + gt_bboxes = gt_bboxes_3d[batch_idx] + valid_idx = [] + # filter empty boxes + for i in range(len(gt_bboxes)): + if gt_bboxes[i][3] > 0 and gt_bboxes[i][4] > 0: + valid_idx.append(i) + assign_result = self.get_targets_single(gt_bboxes[valid_idx], gt_labels_3d[batch_idx][valid_idx], pred_dict) + assign_results.append(assign_result) + + res_tuple = tuple(map(list, zip(*assign_results))) + labels = torch.cat(res_tuple[0], dim=0) + label_weights = torch.cat(res_tuple[1], dim=0) + bbox_targets = torch.cat(res_tuple[2], dim=0) + bbox_weights = torch.cat(res_tuple[3], dim=0) + num_pos = np.sum(res_tuple[4]) + matched_ious = np.mean(res_tuple[5]) + heatmap = torch.cat(res_tuple[6], dim=0) + return labels, label_weights, bbox_targets, bbox_weights, num_pos, matched_ious, heatmap + + + def get_targets_single(self, gt_bboxes_3d, gt_labels_3d, preds_dict): + + num_proposals = preds_dict["center"].shape[-1] + score = copy.deepcopy(preds_dict["heatmap"].detach()) + center = copy.deepcopy(preds_dict["center"].detach()) + height = copy.deepcopy(preds_dict["height"].detach()) + dim = copy.deepcopy(preds_dict["dim"].detach()) + rot = copy.deepcopy(preds_dict["rot"].detach()) + if "vel" in preds_dict.keys(): + vel = copy.deepcopy(preds_dict["vel"].detach()) + else: + vel = None + + boxes_dict = self.decode_bbox(score, rot, dim, center, height, vel) + bboxes_tensor = boxes_dict[0]["pred_boxes"] + gt_bboxes_tensor = gt_bboxes_3d.to(score.device) + + assigned_gt_inds, ious = self.bbox_assigner.assign( + bboxes_tensor, gt_bboxes_tensor, gt_labels_3d, + score, self.point_cloud_range, + ) + pos_inds = torch.nonzero(assigned_gt_inds > 0, as_tuple=False).squeeze(-1).unique() + neg_inds = torch.nonzero(assigned_gt_inds == 0, as_tuple=False).squeeze(-1).unique() + pos_assigned_gt_inds = assigned_gt_inds[pos_inds] - 1 + if gt_bboxes_3d.numel() == 0: + assert pos_inds.numel() == 0 + pos_gt_bboxes = torch.empty_like(gt_bboxes_3d).view(-1, 9) + else: + pos_gt_bboxes = gt_bboxes_3d[pos_assigned_gt_inds.long(), :] + + # create target for loss computation + bbox_targets = torch.zeros([num_proposals, self.code_size]).to(center.device) + bbox_weights = torch.zeros([num_proposals, self.code_size]).to(center.device) + ious = torch.clamp(ious, min=0.0, max=1.0) + labels = bboxes_tensor.new_zeros(num_proposals, dtype=torch.long) + label_weights = bboxes_tensor.new_zeros(num_proposals, dtype=torch.long) + + if gt_labels_3d is not None: # default label is -1 + labels += self.num_classes + + # both pos and neg have classification loss, only pos has regression and iou loss + if len(pos_inds) > 0: + pos_bbox_targets = self.encode_bbox(pos_gt_bboxes) + bbox_targets[pos_inds, :] = pos_bbox_targets + bbox_weights[pos_inds, :] = 1.0 + + if gt_labels_3d is None: + labels[pos_inds] = 1 + else: + labels[pos_inds] = gt_labels_3d[pos_assigned_gt_inds] + label_weights[pos_inds] = 1.0 + + if len(neg_inds) > 0: + label_weights[neg_inds] = 1.0 + + # compute dense heatmap targets + device = labels.device + target_assigner_cfg = self.model_cfg.TARGET_ASSIGNER_CONFIG + feature_map_size = (self.grid_size[:2] // self.feature_map_stride) + heatmap = gt_bboxes_3d.new_zeros(self.num_classes, feature_map_size[1], feature_map_size[0]) + for idx in range(len(gt_bboxes_3d)): + width = gt_bboxes_3d[idx][3] + length = gt_bboxes_3d[idx][4] + width = width / self.voxel_size[0] / self.feature_map_stride + length = length / self.voxel_size[1] / self.feature_map_stride + if width > 0 and length > 0: + radius = centernet_utils.gaussian_radius(length.view(-1), width.view(-1), target_assigner_cfg.GAUSSIAN_OVERLAP)[0] + radius = max(target_assigner_cfg.MIN_RADIUS, int(radius)) + x, y = gt_bboxes_3d[idx][0], gt_bboxes_3d[idx][1] + + coor_x = (x - self.point_cloud_range[0]) / self.voxel_size[0] / self.feature_map_stride + coor_y = (y - self.point_cloud_range[1]) / self.voxel_size[1] / self.feature_map_stride + + center = torch.tensor([coor_x, coor_y], dtype=torch.float32, device=device) + center_int = center.to(torch.int32) + centernet_utils.draw_gaussian_to_heatmap(heatmap[gt_labels_3d[idx]], center_int, radius) + + + mean_iou = ious[pos_inds].sum() / max(len(pos_inds), 1) + return (labels[None], label_weights[None], bbox_targets[None], bbox_weights[None], int(pos_inds.shape[0]), float(mean_iou), heatmap[None]) + + def loss(self, gt_bboxes_3d, gt_labels_3d, pred_dicts, **kwargs): + + labels, label_weights, bbox_targets, bbox_weights, num_pos, matched_ious, heatmap = \ + self.get_targets(gt_bboxes_3d, gt_labels_3d, pred_dicts) + loss_dict = dict() + loss_all = 0 + + # compute heatmap loss + loss_heatmap = self.loss_heatmap( + clip_sigmoid(pred_dicts["dense_heatmap"]), + heatmap, + ).sum() / max(heatmap.eq(1).float().sum().item(), 1) + loss_dict["loss_heatmap"] = loss_heatmap.item() * self.loss_heatmap_weight + loss_all += loss_heatmap * self.loss_heatmap_weight + + labels = labels.reshape(-1) + label_weights = label_weights.reshape(-1) + cls_score = pred_dicts["heatmap"].permute(0, 2, 1).reshape(-1, self.num_classes) + + one_hot_targets = torch.zeros(*list(labels.shape), self.num_classes+1, dtype=cls_score.dtype, device=labels.device) + one_hot_targets.scatter_(-1, labels.unsqueeze(dim=-1).long(), 1.0) + one_hot_targets = one_hot_targets[..., :-1] + loss_cls = self.loss_cls( + cls_score, one_hot_targets, label_weights + ).sum() / max(num_pos, 1) + + preds = torch.cat([pred_dicts[head_name] for head_name in self.model_cfg.SEPARATE_HEAD_CFG.HEAD_ORDER], dim=1).permute(0, 2, 1) + code_weights = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['code_weights'] + reg_weights = bbox_weights * bbox_weights.new_tensor(code_weights) + + loss_bbox = self.loss_bbox(preds, bbox_targets) + loss_bbox = (loss_bbox * reg_weights).sum() / max(num_pos, 1) + + loss_dict["loss_cls"] = loss_cls.item() * self.loss_cls_weight + loss_dict["loss_bbox"] = loss_bbox.item() * self.loss_bbox_weight + loss_all = loss_all + loss_cls * self.loss_cls_weight + loss_bbox * self.loss_bbox_weight + + loss_dict[f"matched_ious"] = loss_cls.new_tensor(matched_ious) + loss_dict['loss_trans'] = loss_all + + return loss_all,loss_dict + + def encode_bbox(self, bboxes): + code_size = 10 + targets = torch.zeros([bboxes.shape[0], code_size]).to(bboxes.device) + targets[:, 0] = (bboxes[:, 0] - self.point_cloud_range[0]) / (self.feature_map_stride * self.voxel_size[0]) + targets[:, 1] = (bboxes[:, 1] - self.point_cloud_range[1]) / (self.feature_map_stride * self.voxel_size[1]) + targets[:, 3:6] = bboxes[:, 3:6].log() + targets[:, 2] = bboxes[:, 2] + targets[:, 6] = torch.sin(bboxes[:, 6]) + targets[:, 7] = torch.cos(bboxes[:, 6]) + if code_size == 10: + targets[:, 8:10] = bboxes[:, 7:] + return targets + + def decode_bbox(self, heatmap, rot, dim, center, height, vel, filter=False): + + post_process_cfg = self.model_cfg.POST_PROCESSING + score_thresh = post_process_cfg.SCORE_THRESH + post_center_range = post_process_cfg.POST_CENTER_RANGE + post_center_range = torch.tensor(post_center_range).cuda().float() + # class label + final_preds = heatmap.max(1, keepdims=False).indices + final_scores = heatmap.max(1, keepdims=False).values + + center[:, 0, :] = center[:, 0, :] * self.feature_map_stride * self.voxel_size[0] + self.point_cloud_range[0] + center[:, 1, :] = center[:, 1, :] * self.feature_map_stride * self.voxel_size[1] + self.point_cloud_range[1] + dim = dim.exp() + rots, rotc = rot[:, 0:1, :], rot[:, 1:2, :] + rot = torch.atan2(rots, rotc) + + if vel is None: + final_box_preds = torch.cat([center, height, dim, rot], dim=1).permute(0, 2, 1) + else: + final_box_preds = torch.cat([center, height, dim, rot, vel], dim=1).permute(0, 2, 1) + + predictions_dicts = [] + for i in range(heatmap.shape[0]): + boxes3d = final_box_preds[i] + scores = final_scores[i] + labels = final_preds[i] + predictions_dict = { + 'pred_boxes': boxes3d, + 'pred_scores': scores, + 'pred_labels': labels + } + predictions_dicts.append(predictions_dict) + + if filter is False: + return predictions_dicts + + thresh_mask = final_scores > score_thresh + mask = (final_box_preds[..., :3] >= post_center_range[:3]).all(2) + mask &= (final_box_preds[..., :3] <= post_center_range[3:]).all(2) + + predictions_dicts = [] + for i in range(heatmap.shape[0]): + cmask = mask[i, :] + cmask &= thresh_mask[i] + + boxes3d = final_box_preds[i, cmask] + scores = final_scores[i, cmask] + labels = final_preds[i, cmask] + predictions_dict = { + 'pred_boxes': boxes3d, + 'pred_scores': scores, + 'pred_labels': labels, + } + + predictions_dicts.append(predictions_dict) + + return predictions_dicts + + def get_bboxes(self, preds_dicts): + + batch_size = preds_dicts["heatmap"].shape[0] + batch_score = preds_dicts["heatmap"].sigmoid() + one_hot = F.one_hot( + self.query_labels, num_classes=self.num_classes + ).permute(0, 2, 1) + batch_score = batch_score * preds_dicts["query_heatmap_score"] * one_hot + batch_center = preds_dicts["center"] + batch_height = preds_dicts["height"] + batch_dim = preds_dicts["dim"] + batch_rot = preds_dicts["rot"] + batch_vel = None + if "vel" in preds_dicts: + batch_vel = preds_dicts["vel"] + + ret_dict = self.decode_bbox( + batch_score, batch_rot, batch_dim, + batch_center, batch_height, batch_vel, + filter=True, + ) + for k in range(batch_size): + ret_dict[k]['pred_labels'] = ret_dict[k]['pred_labels'].int() + 1 + + return ret_dict diff --git a/toolbox/openpcdet/pcdet/models/dense_heads/voxelnext_head.py b/toolbox/openpcdet/pcdet/models/dense_heads/voxelnext_head.py new file mode 100644 index 000000000..e2f234f73 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/dense_heads/voxelnext_head.py @@ -0,0 +1,559 @@ +import numpy as np +import torch +import torch.nn as nn +from torch.nn.init import kaiming_normal_ +from ..model_utils import centernet_utils +from ..model_utils import model_nms_utils +from ...utils import loss_utils +from ...utils.spconv_utils import replace_feature, spconv +import copy +from easydict import EasyDict + + +class SeparateHead(nn.Module): + def __init__(self, input_channels, sep_head_dict, kernel_size, init_bias=-2.19, use_bias=False): + super().__init__() + self.sep_head_dict = sep_head_dict + + for cur_name in self.sep_head_dict: + output_channels = self.sep_head_dict[cur_name]['out_channels'] + num_conv = self.sep_head_dict[cur_name]['num_conv'] + + fc_list = [] + for k in range(num_conv - 1): + fc_list.append(spconv.SparseSequential( + spconv.SubMConv2d(input_channels, input_channels, kernel_size, padding=int(kernel_size//2), bias=use_bias, indice_key=cur_name), + nn.BatchNorm1d(input_channels), + nn.ReLU() + )) + fc_list.append(spconv.SubMConv2d(input_channels, output_channels, 1, bias=True, indice_key=cur_name+'out')) + fc = nn.Sequential(*fc_list) + if 'hm' in cur_name: + fc[-1].bias.data.fill_(init_bias) + else: + for m in fc.modules(): + if isinstance(m, spconv.SubMConv2d): + kaiming_normal_(m.weight.data) + if hasattr(m, "bias") and m.bias is not None: + nn.init.constant_(m.bias, 0) + + self.__setattr__(cur_name, fc) + + def forward(self, x): + ret_dict = {} + for cur_name in self.sep_head_dict: + ret_dict[cur_name] = self.__getattr__(cur_name)(x).features + + return ret_dict + + +class VoxelNeXtHead(nn.Module): + def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, point_cloud_range, voxel_size, + predict_boxes_when_training=False): + super().__init__() + self.model_cfg = model_cfg + self.num_class = num_class + self.grid_size = grid_size + self.point_cloud_range = torch.Tensor(point_cloud_range).cuda() + self.voxel_size = torch.Tensor(voxel_size).cuda() + self.feature_map_stride = self.model_cfg.TARGET_ASSIGNER_CONFIG.get('FEATURE_MAP_STRIDE', None) + + self.class_names = class_names + self.class_names_each_head = [] + self.class_id_mapping_each_head = [] + self.gaussian_ratio = self.model_cfg.get('GAUSSIAN_RATIO', 1) + self.gaussian_type = self.model_cfg.get('GAUSSIAN_TYPE', ['nearst', 'gt_center']) + # The iou branch is only used for Waymo dataset + self.iou_branch = self.model_cfg.get('IOU_BRANCH', False) + if self.iou_branch: + self.rectifier = self.model_cfg.get('RECTIFIER') + nms_configs = self.model_cfg.POST_PROCESSING.NMS_CONFIG + self.nms_configs = [EasyDict(NMS_TYPE=nms_configs.NMS_TYPE, + NMS_THRESH=nms_configs.NMS_THRESH[i], + NMS_PRE_MAXSIZE=nms_configs.NMS_PRE_MAXSIZE[i], + NMS_POST_MAXSIZE=nms_configs.NMS_POST_MAXSIZE[i]) for i in range(num_class)] + + self.double_flip = self.model_cfg.get('DOUBLE_FLIP', False) + for cur_class_names in self.model_cfg.CLASS_NAMES_EACH_HEAD: + self.class_names_each_head.append([x for x in cur_class_names if x in class_names]) + cur_class_id_mapping = torch.from_numpy(np.array( + [self.class_names.index(x) for x in cur_class_names if x in class_names] + )).cuda() + self.class_id_mapping_each_head.append(cur_class_id_mapping) + + total_classes = sum([len(x) for x in self.class_names_each_head]) + assert total_classes == len(self.class_names), f'class_names_each_head={self.class_names_each_head}' + + kernel_size_head = self.model_cfg.get('KERNEL_SIZE_HEAD', 3) + + self.heads_list = nn.ModuleList() + self.separate_head_cfg = self.model_cfg.SEPARATE_HEAD_CFG + for idx, cur_class_names in enumerate(self.class_names_each_head): + cur_head_dict = copy.deepcopy(self.separate_head_cfg.HEAD_DICT) + cur_head_dict['hm'] = dict(out_channels=len(cur_class_names), num_conv=self.model_cfg.NUM_HM_CONV) + self.heads_list.append( + SeparateHead( + input_channels=self.model_cfg.get('SHARED_CONV_CHANNEL', 128), + sep_head_dict=cur_head_dict, + kernel_size=kernel_size_head, + init_bias=-2.19, + use_bias=self.model_cfg.get('USE_BIAS_BEFORE_NORM', False), + ) + ) + self.predict_boxes_when_training = predict_boxes_when_training + self.forward_ret_dict = {} + self.build_losses() + + def build_losses(self): + self.add_module('hm_loss_func', loss_utils.FocalLossSparse()) + self.add_module('reg_loss_func', loss_utils.RegLossSparse()) + if self.iou_branch: + self.add_module('crit_iou', loss_utils.IouLossSparse()) + self.add_module('crit_iou_reg', loss_utils.IouRegLossSparse()) + + def assign_targets(self, gt_boxes, num_voxels, spatial_indices, spatial_shape): + """ + Args: + gt_boxes: (B, M, 8) + Returns: + """ + target_assigner_cfg = self.model_cfg.TARGET_ASSIGNER_CONFIG + + batch_size = gt_boxes.shape[0] + ret_dict = { + 'heatmaps': [], + 'target_boxes': [], + 'inds': [], + 'masks': [], + 'heatmap_masks': [], + 'gt_boxes': [] + } + + all_names = np.array(['bg', *self.class_names]) + for idx, cur_class_names in enumerate(self.class_names_each_head): + heatmap_list, target_boxes_list, inds_list, masks_list, gt_boxes_list = [], [], [], [], [] + for bs_idx in range(batch_size): + cur_gt_boxes = gt_boxes[bs_idx] + gt_class_names = all_names[cur_gt_boxes[:, -1].cpu().long().numpy()] + + gt_boxes_single_head = [] + + for idx, name in enumerate(gt_class_names): + if name not in cur_class_names: + continue + temp_box = cur_gt_boxes[idx] + temp_box[-1] = cur_class_names.index(name) + 1 + gt_boxes_single_head.append(temp_box[None, :]) + + if len(gt_boxes_single_head) == 0: + gt_boxes_single_head = cur_gt_boxes[:0, :] + else: + gt_boxes_single_head = torch.cat(gt_boxes_single_head, dim=0) + + heatmap, ret_boxes, inds, mask = self.assign_target_of_single_head( + num_classes=len(cur_class_names), gt_boxes=gt_boxes_single_head, + num_voxels=num_voxels[bs_idx], spatial_indices=spatial_indices[bs_idx], + spatial_shape=spatial_shape, + feature_map_stride=target_assigner_cfg.FEATURE_MAP_STRIDE, + num_max_objs=target_assigner_cfg.NUM_MAX_OBJS, + gaussian_overlap=target_assigner_cfg.GAUSSIAN_OVERLAP, + min_radius=target_assigner_cfg.MIN_RADIUS, + ) + heatmap_list.append(heatmap.to(gt_boxes_single_head.device)) + target_boxes_list.append(ret_boxes.to(gt_boxes_single_head.device)) + inds_list.append(inds.to(gt_boxes_single_head.device)) + masks_list.append(mask.to(gt_boxes_single_head.device)) + gt_boxes_list.append(gt_boxes_single_head[:, :-1]) + + ret_dict['heatmaps'].append(torch.cat(heatmap_list, dim=1).permute(1, 0)) + ret_dict['target_boxes'].append(torch.stack(target_boxes_list, dim=0)) + ret_dict['inds'].append(torch.stack(inds_list, dim=0)) + ret_dict['masks'].append(torch.stack(masks_list, dim=0)) + ret_dict['gt_boxes'].append(gt_boxes_list) + + return ret_dict + + def distance(self, voxel_indices, center): + distances = ((voxel_indices - center.unsqueeze(0))**2).sum(-1) + return distances + + def assign_target_of_single_head( + self, num_classes, gt_boxes, num_voxels, spatial_indices, spatial_shape, feature_map_stride, num_max_objs=500, + gaussian_overlap=0.1, min_radius=2 + ): + """ + Args: + gt_boxes: (N, 8) + feature_map_size: (2), [x, y] + + Returns: + + """ + heatmap = gt_boxes.new_zeros(num_classes, num_voxels) + + ret_boxes = gt_boxes.new_zeros((num_max_objs, gt_boxes.shape[-1] - 1 + 1)) + inds = gt_boxes.new_zeros(num_max_objs).long() + mask = gt_boxes.new_zeros(num_max_objs).long() + + x, y, z = gt_boxes[:, 0], gt_boxes[:, 1], gt_boxes[:, 2] + coord_x = (x - self.point_cloud_range[0]) / self.voxel_size[0] / feature_map_stride + coord_y = (y - self.point_cloud_range[1]) / self.voxel_size[1] / feature_map_stride + + coord_x = torch.clamp(coord_x, min=0, max=spatial_shape[1] - 0.5) # bugfixed: 1e-6 does not work for center.int() + coord_y = torch.clamp(coord_y, min=0, max=spatial_shape[0] - 0.5) # + + center = torch.cat((coord_x[:, None], coord_y[:, None]), dim=-1) + center_int = center.int() + center_int_float = center_int.float() + + dx, dy, dz = gt_boxes[:, 3], gt_boxes[:, 4], gt_boxes[:, 5] + dx = dx / self.voxel_size[0] / feature_map_stride + dy = dy / self.voxel_size[1] / feature_map_stride + + radius = centernet_utils.gaussian_radius(dx, dy, min_overlap=gaussian_overlap) + radius = torch.clamp_min(radius.int(), min=min_radius) + + for k in range(min(num_max_objs, gt_boxes.shape[0])): + if dx[k] <= 0 or dy[k] <= 0: + continue + + if not (0 <= center_int[k][0] <= spatial_shape[1] and 0 <= center_int[k][1] <= spatial_shape[0]): + continue + + cur_class_id = (gt_boxes[k, -1] - 1).long() + distance = self.distance(spatial_indices, center[k]) + inds[k] = distance.argmin() + mask[k] = 1 + + if 'gt_center' in self.gaussian_type: + centernet_utils.draw_gaussian_to_heatmap_voxels(heatmap[cur_class_id], distance, radius[k].item() * self.gaussian_ratio) + + if 'nearst' in self.gaussian_type: + centernet_utils.draw_gaussian_to_heatmap_voxels(heatmap[cur_class_id], self.distance(spatial_indices, spatial_indices[inds[k]]), radius[k].item() * self.gaussian_ratio) + + ret_boxes[k, 0:2] = center[k] - spatial_indices[inds[k]][:2] + ret_boxes[k, 2] = z[k] + ret_boxes[k, 3:6] = gt_boxes[k, 3:6].log() + ret_boxes[k, 6] = torch.cos(gt_boxes[k, 6]) + ret_boxes[k, 7] = torch.sin(gt_boxes[k, 6]) + if gt_boxes.shape[1] > 8: + ret_boxes[k, 8:] = gt_boxes[k, 7:-1] + + return heatmap, ret_boxes, inds, mask + + def sigmoid(self, x): + y = torch.clamp(x.sigmoid(), min=1e-4, max=1 - 1e-4) + return y + + def get_loss(self): + pred_dicts = self.forward_ret_dict['pred_dicts'] + target_dicts = self.forward_ret_dict['target_dicts'] + batch_index = self.forward_ret_dict['batch_index'] + + tb_dict = {} + loss = 0 + batch_indices = self.forward_ret_dict['voxel_indices'][:, 0] + spatial_indices = self.forward_ret_dict['voxel_indices'][:, 1:] + + for idx, pred_dict in enumerate(pred_dicts): + pred_dict['hm'] = self.sigmoid(pred_dict['hm']) + hm_loss = self.hm_loss_func(pred_dict['hm'], target_dicts['heatmaps'][idx]) + hm_loss *= self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['cls_weight'] + + target_boxes = target_dicts['target_boxes'][idx] + pred_boxes = torch.cat([pred_dict[head_name] for head_name in self.separate_head_cfg.HEAD_ORDER], dim=1) + + reg_loss = self.reg_loss_func( + pred_boxes, target_dicts['masks'][idx], target_dicts['inds'][idx], target_boxes, batch_index + ) + loc_loss = (reg_loss * reg_loss.new_tensor(self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['code_weights'])).sum() + loc_loss = loc_loss * self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['loc_weight'] + tb_dict['hm_loss_head_%d' % idx] = hm_loss.item() + tb_dict['loc_loss_head_%d' % idx] = loc_loss.item() + if self.iou_branch: + batch_box_preds = self._get_predicted_boxes(pred_dict, spatial_indices) + pred_boxes_for_iou = batch_box_preds.detach() + iou_loss = self.crit_iou(pred_dict['iou'], target_dicts['masks'][idx], target_dicts['inds'][idx], + pred_boxes_for_iou, target_dicts['gt_boxes'][idx], batch_indices) + + iou_reg_loss = self.crit_iou_reg(batch_box_preds, target_dicts['masks'][idx], target_dicts['inds'][idx], + target_dicts['gt_boxes'][idx], batch_indices) + iou_weight = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['iou_weight'] if 'iou_weight' in self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS else self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['loc_weight'] + iou_reg_loss = iou_reg_loss * iou_weight #self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['loc_weight'] + + loss += (hm_loss + loc_loss + iou_loss + iou_reg_loss) + tb_dict['iou_loss_head_%d' % idx] = iou_loss.item() + tb_dict['iou_reg_loss_head_%d' % idx] = iou_reg_loss.item() + else: + loss += hm_loss + loc_loss + + tb_dict['rpn_loss'] = loss.item() + return loss, tb_dict + + def _get_predicted_boxes(self, pred_dict, spatial_indices): + center = pred_dict['center'] + center_z = pred_dict['center_z'] + #dim = pred_dict['dim'].exp() + dim = torch.exp(torch.clamp(pred_dict['dim'], min=-5, max=5)) + rot_cos = pred_dict['rot'][:, 0].unsqueeze(dim=1) + rot_sin = pred_dict['rot'][:, 1].unsqueeze(dim=1) + angle = torch.atan2(rot_sin, rot_cos) + xs = (spatial_indices[:, 1:2] + center[:, 0:1]) * self.feature_map_stride * self.voxel_size[0] + self.point_cloud_range[0] + ys = (spatial_indices[:, 0:1] + center[:, 1:2]) * self.feature_map_stride * self.voxel_size[1] + self.point_cloud_range[1] + + box_part_list = [xs, ys, center_z, dim, angle] + pred_box = torch.cat((box_part_list), dim=-1) + return pred_box + + def rotate_class_specific_nms_iou(self, boxes, scores, iou_preds, labels, rectifier, nms_configs): + """ + :param boxes: (N, 5) [x, y, z, l, w, h, theta] + :param scores: (N) + :param thresh: + :return: + """ + assert isinstance(rectifier, list) + + box_preds_list, scores_list, labels_list = [], [], [] + for cls in range(self.num_class): + mask = labels == cls + boxes_cls = boxes[mask] + scores_cls = torch.pow(scores[mask], 1 - rectifier[cls]) * torch.pow(iou_preds[mask].squeeze(-1), rectifier[cls]) + labels_cls = labels[mask] + + selected, selected_scores = model_nms_utils.class_agnostic_nms(box_scores=scores_cls, box_preds=boxes_cls, + nms_config=nms_configs[cls], score_thresh=None) + + box_preds_list.append(boxes_cls[selected]) + scores_list.append(scores_cls[selected]) + labels_list.append(labels_cls[selected]) + + return torch.cat(box_preds_list, dim=0), torch.cat(scores_list, dim=0), torch.cat(labels_list, dim=0) + + def merge_double_flip(self, pred_dict, batch_size, voxel_indices, spatial_shape): + # spatial_shape (Z, Y, X) + pred_dict['hm'] = pred_dict['hm'].sigmoid() + pred_dict['dim'] = pred_dict['dim'].exp() + + batch_indices = voxel_indices[:, 0] + spatial_indices = voxel_indices[:, 1:] + + pred_dict_ = {k: [] for k in pred_dict.keys()} + counts = [] + spatial_indices_ = [] + for bs_idx in range(batch_size): + spatial_indices_batch = [] + pred_dict_batch = {k: [] for k in pred_dict.keys()} + for i in range(4): + bs_indices = batch_indices == (bs_idx * 4 + i) + if i in [1, 3]: + spatial_indices[bs_indices, 0] = spatial_shape[0] - spatial_indices[bs_indices, 0] + if i in [2, 3]: + spatial_indices[bs_indices, 1] = spatial_shape[1] - spatial_indices[bs_indices, 1] + + if i == 1: + pred_dict['center'][bs_indices, 1] = - pred_dict['center'][bs_indices, 1] + pred_dict['rot'][bs_indices, 1] *= -1 + pred_dict['vel'][bs_indices, 1] *= -1 + + if i == 2: + pred_dict['center'][bs_indices, 0] = - pred_dict['center'][bs_indices, 0] + pred_dict['rot'][bs_indices, 0] *= -1 + pred_dict['vel'][bs_indices, 0] *= -1 + + if i == 3: + pred_dict['center'][bs_indices, 0] = - pred_dict['center'][bs_indices, 0] + pred_dict['center'][bs_indices, 1] = - pred_dict['center'][bs_indices, 1] + + pred_dict['rot'][bs_indices, 1] *= -1 + pred_dict['rot'][bs_indices, 0] *= -1 + + pred_dict['vel'][bs_indices] *= -1 + + spatial_indices_batch.append(spatial_indices[bs_indices]) + + for k in pred_dict.keys(): + pred_dict_batch[k].append(pred_dict[k][bs_indices]) + + spatial_indices_batch = torch.cat(spatial_indices_batch) + + spatial_indices_unique, _inv, count = torch.unique(spatial_indices_batch, dim=0, return_inverse=True, + return_counts=True) + spatial_indices_.append(spatial_indices_unique) + counts.append(count) + for k in pred_dict.keys(): + pred_dict_batch[k] = torch.cat(pred_dict_batch[k]) + features_unique = pred_dict_batch[k].new_zeros( + (spatial_indices_unique.shape[0], pred_dict_batch[k].shape[1])) + features_unique.index_add_(0, _inv, pred_dict_batch[k]) + pred_dict_[k].append(features_unique) + + for k in pred_dict.keys(): + pred_dict_[k] = torch.cat(pred_dict_[k]) + counts = torch.cat(counts).unsqueeze(-1).float() + voxel_indices_ = torch.cat([torch.cat( + [torch.full((indices.shape[0], 1), i, device=indices.device, dtype=indices.dtype), indices], dim=1 + ) for i, indices in enumerate(spatial_indices_)]) + + batch_hm = pred_dict_['hm'] + batch_center = pred_dict_['center'] + batch_center_z = pred_dict_['center_z'] + batch_dim = pred_dict_['dim'] + batch_rot_cos = pred_dict_['rot'][:, 0].unsqueeze(dim=1) + batch_rot_sin = pred_dict_['rot'][:, 1].unsqueeze(dim=1) + batch_vel = pred_dict_['vel'] if 'vel' in self.separate_head_cfg.HEAD_ORDER else None + + batch_hm /= counts + batch_center /= counts + batch_center_z /= counts + batch_dim /= counts + batch_rot_cos /= counts + batch_rot_sin /= counts + + if not batch_vel is None: + batch_vel /= counts + + return batch_hm, batch_center, batch_center_z, batch_dim, batch_rot_cos, batch_rot_sin, batch_vel, None, voxel_indices_ + + def generate_predicted_boxes(self, batch_size, pred_dicts, voxel_indices, spatial_shape): + post_process_cfg = self.model_cfg.POST_PROCESSING + post_center_limit_range = torch.tensor(post_process_cfg.POST_CENTER_LIMIT_RANGE).cuda().float() + + ret_dict = [{ + 'pred_boxes': [], + 'pred_scores': [], + 'pred_labels': [], + 'pred_ious': [], + } for k in range(batch_size)] + for idx, pred_dict in enumerate(pred_dicts): + if self.double_flip: + batch_hm, batch_center, batch_center_z, batch_dim, batch_rot_cos, batch_rot_sin, batch_vel, batch_iou, voxel_indices_ = \ + self.merge_double_flip(pred_dict, batch_size, voxel_indices.clone(), spatial_shape) + else: + batch_hm = pred_dict['hm'].sigmoid() + batch_center = pred_dict['center'] + batch_center_z = pred_dict['center_z'] + batch_dim = pred_dict['dim'].exp() + batch_rot_cos = pred_dict['rot'][:, 0].unsqueeze(dim=1) + batch_rot_sin = pred_dict['rot'][:, 1].unsqueeze(dim=1) + batch_iou = (pred_dict['iou'] + 1) * 0.5 if self.iou_branch else None + batch_vel = pred_dict['vel'] if 'vel' in self.separate_head_cfg.HEAD_ORDER else None + voxel_indices_ = voxel_indices + + final_pred_dicts = centernet_utils.decode_bbox_from_voxels_nuscenes( + batch_size=batch_size, indices=voxel_indices_, + obj=batch_hm, + rot_cos=batch_rot_cos, + rot_sin=batch_rot_sin, + center=batch_center, center_z=batch_center_z, + dim=batch_dim, vel=batch_vel, iou=batch_iou, + point_cloud_range=self.point_cloud_range, voxel_size=self.voxel_size, + feature_map_stride=self.feature_map_stride, + K=post_process_cfg.MAX_OBJ_PER_SAMPLE, + #circle_nms=(post_process_cfg.NMS_CONFIG.NMS_TYPE == 'circle_nms'), + score_thresh=post_process_cfg.SCORE_THRESH, + post_center_limit_range=post_center_limit_range + ) + + for k, final_dict in enumerate(final_pred_dicts): + final_dict['pred_labels'] = self.class_id_mapping_each_head[idx][final_dict['pred_labels'].long()] + if not self.iou_branch: + selected, selected_scores = model_nms_utils.class_agnostic_nms( + box_scores=final_dict['pred_scores'], box_preds=final_dict['pred_boxes'], + nms_config=post_process_cfg.NMS_CONFIG, + score_thresh=None + ) + + final_dict['pred_boxes'] = final_dict['pred_boxes'][selected] + final_dict['pred_scores'] = selected_scores + final_dict['pred_labels'] = final_dict['pred_labels'][selected] + + ret_dict[k]['pred_boxes'].append(final_dict['pred_boxes']) + ret_dict[k]['pred_scores'].append(final_dict['pred_scores']) + ret_dict[k]['pred_labels'].append(final_dict['pred_labels']) + ret_dict[k]['pred_ious'].append(final_dict['pred_ious']) + + for k in range(batch_size): + pred_boxes = torch.cat(ret_dict[k]['pred_boxes'], dim=0) + pred_scores = torch.cat(ret_dict[k]['pred_scores'], dim=0) + pred_labels = torch.cat(ret_dict[k]['pred_labels'], dim=0) + if self.iou_branch: + pred_ious = torch.cat(ret_dict[k]['pred_ious'], dim=0) + pred_boxes, pred_scores, pred_labels = self.rotate_class_specific_nms_iou(pred_boxes, pred_scores, pred_ious, pred_labels, self.rectifier, self.nms_configs) + + ret_dict[k]['pred_boxes'] = pred_boxes + ret_dict[k]['pred_scores'] = pred_scores + ret_dict[k]['pred_labels'] = pred_labels + 1 + + return ret_dict + + @staticmethod + def reorder_rois_for_refining(batch_size, pred_dicts): + num_max_rois = max([len(cur_dict['pred_boxes']) for cur_dict in pred_dicts]) + num_max_rois = max(1, num_max_rois) # at least one faked rois to avoid error + pred_boxes = pred_dicts[0]['pred_boxes'] + + rois = pred_boxes.new_zeros((batch_size, num_max_rois, pred_boxes.shape[-1])) + roi_scores = pred_boxes.new_zeros((batch_size, num_max_rois)) + roi_labels = pred_boxes.new_zeros((batch_size, num_max_rois)).long() + + for bs_idx in range(batch_size): + num_boxes = len(pred_dicts[bs_idx]['pred_boxes']) + + rois[bs_idx, :num_boxes, :] = pred_dicts[bs_idx]['pred_boxes'] + roi_scores[bs_idx, :num_boxes] = pred_dicts[bs_idx]['pred_scores'] + roi_labels[bs_idx, :num_boxes] = pred_dicts[bs_idx]['pred_labels'] + return rois, roi_scores, roi_labels + + def _get_voxel_infos(self, x): + spatial_shape = x.spatial_shape + voxel_indices = x.indices + spatial_indices = [] + num_voxels = [] + batch_size = x.batch_size + batch_index = voxel_indices[:, 0] + + for bs_idx in range(batch_size): + batch_inds = batch_index==bs_idx + spatial_indices.append(voxel_indices[batch_inds][:, [2, 1]]) + num_voxels.append(batch_inds.sum()) + + return spatial_shape, batch_index, voxel_indices, spatial_indices, num_voxels + + def forward(self, data_dict): + x = data_dict['encoded_spconv_tensor'] + + spatial_shape, batch_index, voxel_indices, spatial_indices, num_voxels = self._get_voxel_infos(x) + self.forward_ret_dict['batch_index'] = batch_index + + pred_dicts = [] + for head in self.heads_list: + pred_dicts.append(head(x)) + + if self.training: + target_dict = self.assign_targets( + data_dict['gt_boxes'], num_voxels, spatial_indices, spatial_shape + ) + self.forward_ret_dict['target_dicts'] = target_dict + + self.forward_ret_dict['pred_dicts'] = pred_dicts + self.forward_ret_dict['voxel_indices'] = voxel_indices + + if not self.training or self.predict_boxes_when_training: + if self.double_flip: + data_dict['batch_size'] = data_dict['batch_size'] // 4 + pred_dicts = self.generate_predicted_boxes( + data_dict['batch_size'], + pred_dicts, voxel_indices, spatial_shape + ) + + if self.predict_boxes_when_training: + rois, roi_scores, roi_labels = self.reorder_rois_for_refining(data_dict['batch_size'], pred_dicts) + data_dict['rois'] = rois + data_dict['roi_scores'] = roi_scores + data_dict['roi_labels'] = roi_labels + data_dict['has_class_labels'] = True + else: + data_dict['final_box_dicts'] = pred_dicts + + return data_dict diff --git a/toolbox/openpcdet/pcdet/models/detectors/PartA2_net.py b/toolbox/openpcdet/pcdet/models/detectors/PartA2_net.py new file mode 100644 index 000000000..890b59471 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/detectors/PartA2_net.py @@ -0,0 +1,31 @@ +from .detector3d_template import Detector3DTemplate + + +class PartA2Net(Detector3DTemplate): + def __init__(self, model_cfg, num_class, dataset): + super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) + self.module_list = self.build_networks() + + def forward(self, batch_dict): + for cur_module in self.module_list: + batch_dict = cur_module(batch_dict) + + if self.training: + loss, tb_dict, disp_dict = self.get_training_loss() + + ret_dict = { + 'loss': loss + } + return ret_dict, tb_dict, disp_dict + else: + pred_dicts, recall_dicts = self.post_processing(batch_dict) + return pred_dicts, recall_dicts + + def get_training_loss(self): + disp_dict = {} + loss_rpn, tb_dict = self.dense_head.get_loss() + loss_point, tb_dict = self.point_head.get_loss(tb_dict) + loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict) + + loss = loss_rpn + loss_point + loss_rcnn + return loss, tb_dict, disp_dict diff --git a/toolbox/openpcdet/pcdet/models/detectors/__init__.py b/toolbox/openpcdet/pcdet/models/detectors/__init__.py new file mode 100644 index 000000000..1af193078 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/detectors/__init__.py @@ -0,0 +1,46 @@ +from .detector3d_template import Detector3DTemplate +from .PartA2_net import PartA2Net +from .point_rcnn import PointRCNN +from .pointpillar import PointPillar +from .pv_rcnn import PVRCNN +from .second_net import SECONDNet +from .second_net_iou import SECONDNetIoU +from .caddn import CaDDN +from .voxel_rcnn import VoxelRCNN +from .centerpoint import CenterPoint +from .pv_rcnn_plusplus import PVRCNNPlusPlus +from .mppnet import MPPNet +from .mppnet_e2e import MPPNetE2E +from .pillarnet import PillarNet +from .voxelnext import VoxelNeXt +from .transfusion import TransFusion +from .bevfusion import BevFusion + +__all__ = { + 'Detector3DTemplate': Detector3DTemplate, + 'SECONDNet': SECONDNet, + 'PartA2Net': PartA2Net, + 'PVRCNN': PVRCNN, + 'PointPillar': PointPillar, + 'PointRCNN': PointRCNN, + 'SECONDNetIoU': SECONDNetIoU, + 'CaDDN': CaDDN, + 'VoxelRCNN': VoxelRCNN, + 'CenterPoint': CenterPoint, + 'PillarNet': PillarNet, + 'PVRCNNPlusPlus': PVRCNNPlusPlus, + 'MPPNet': MPPNet, + 'MPPNetE2E': MPPNetE2E, + 'PillarNet': PillarNet, + 'VoxelNeXt': VoxelNeXt, + 'TransFusion': TransFusion, + 'BevFusion': BevFusion, +} + + +def build_detector(model_cfg, num_class, dataset): + model = __all__[model_cfg.NAME]( + model_cfg=model_cfg, num_class=num_class, dataset=dataset + ) + + return model diff --git a/toolbox/openpcdet/pcdet/models/detectors/bevfusion.py b/toolbox/openpcdet/pcdet/models/detectors/bevfusion.py new file mode 100644 index 000000000..bb55fc094 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/detectors/bevfusion.py @@ -0,0 +1,101 @@ +from .detector3d_template import Detector3DTemplate +from .. import backbones_image, view_transforms +from ..backbones_image import img_neck +from ..backbones_2d import fuser + +class BevFusion(Detector3DTemplate): + def __init__(self, model_cfg, num_class, dataset): + super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) + self.module_topology = [ + 'vfe', 'backbone_3d', 'map_to_bev_module', 'pfe', + 'image_backbone','neck','vtransform','fuser', + 'backbone_2d', 'dense_head', 'point_head', 'roi_head' + ] + self.module_list = self.build_networks() + + def build_neck(self,model_info_dict): + if self.model_cfg.get('NECK', None) is None: + return None, model_info_dict + neck_module = img_neck.__all__[self.model_cfg.NECK.NAME]( + model_cfg=self.model_cfg.NECK + ) + model_info_dict['module_list'].append(neck_module) + + return neck_module, model_info_dict + + def build_vtransform(self,model_info_dict): + if self.model_cfg.get('VTRANSFORM', None) is None: + return None, model_info_dict + + vtransform_module = view_transforms.__all__[self.model_cfg.VTRANSFORM.NAME]( + model_cfg=self.model_cfg.VTRANSFORM + ) + model_info_dict['module_list'].append(vtransform_module) + + return vtransform_module, model_info_dict + + def build_image_backbone(self, model_info_dict): + if self.model_cfg.get('IMAGE_BACKBONE', None) is None: + return None, model_info_dict + image_backbone_module = backbones_image.__all__[self.model_cfg.IMAGE_BACKBONE.NAME]( + model_cfg=self.model_cfg.IMAGE_BACKBONE + ) + image_backbone_module.init_weights() + model_info_dict['module_list'].append(image_backbone_module) + + return image_backbone_module, model_info_dict + + def build_fuser(self, model_info_dict): + if self.model_cfg.get('FUSER', None) is None: + return None, model_info_dict + + fuser_module = fuser.__all__[self.model_cfg.FUSER.NAME]( + model_cfg=self.model_cfg.FUSER + ) + model_info_dict['module_list'].append(fuser_module) + model_info_dict['num_bev_features'] = self.model_cfg.FUSER.OUT_CHANNEL + return fuser_module, model_info_dict + + def forward(self, batch_dict): + + for i,cur_module in enumerate(self.module_list): + batch_dict = cur_module(batch_dict) + + if self.training: + loss, tb_dict, disp_dict = self.get_training_loss(batch_dict) + + ret_dict = { + 'loss': loss + } + return ret_dict, tb_dict, disp_dict + else: + pred_dicts, recall_dicts = self.post_processing(batch_dict) + return pred_dicts, recall_dicts + + def get_training_loss(self,batch_dict): + disp_dict = {} + + loss_trans, tb_dict = batch_dict['loss'],batch_dict['tb_dict'] + tb_dict = { + 'loss_trans': loss_trans.item(), + **tb_dict + } + + loss = loss_trans + return loss, tb_dict, disp_dict + + def post_processing(self, batch_dict): + post_process_cfg = self.model_cfg.POST_PROCESSING + batch_size = batch_dict['batch_size'] + final_pred_dict = batch_dict['final_box_dicts'] + recall_dict = {} + for index in range(batch_size): + pred_boxes = final_pred_dict[index]['pred_boxes'] + + recall_dict = self.generate_recall_record( + box_preds=pred_boxes, + recall_dict=recall_dict, batch_index=index, data_dict=batch_dict, + thresh_list=post_process_cfg.RECALL_THRESH_LIST + ) + + return final_pred_dict, recall_dict diff --git a/toolbox/openpcdet/pcdet/models/detectors/caddn.py b/toolbox/openpcdet/pcdet/models/detectors/caddn.py new file mode 100644 index 000000000..32f56a796 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/detectors/caddn.py @@ -0,0 +1,38 @@ +from .detector3d_template import Detector3DTemplate + + +class CaDDN(Detector3DTemplate): + def __init__(self, model_cfg, num_class, dataset): + super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) + self.module_list = self.build_networks() + + def forward(self, batch_dict): + for cur_module in self.module_list: + batch_dict = cur_module(batch_dict) + + if self.training: + loss, tb_dict, disp_dict = self.get_training_loss() + + ret_dict = { + 'loss': loss + } + return ret_dict, tb_dict, disp_dict + else: + pred_dicts, recall_dicts = self.post_processing(batch_dict) + return pred_dicts, recall_dicts + + def get_training_loss(self): + disp_dict = {} + + loss_rpn, tb_dict_rpn = self.dense_head.get_loss() + loss_depth, tb_dict_depth = self.vfe.get_loss() + + tb_dict = { + 'loss_rpn': loss_rpn.item(), + 'loss_depth': loss_depth.item(), + **tb_dict_rpn, + **tb_dict_depth + } + + loss = loss_rpn + loss_depth + return loss, tb_dict, disp_dict diff --git a/toolbox/openpcdet/pcdet/models/detectors/centerpoint.py b/toolbox/openpcdet/pcdet/models/detectors/centerpoint.py new file mode 100644 index 000000000..a5bc01163 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/detectors/centerpoint.py @@ -0,0 +1,50 @@ +from .detector3d_template import Detector3DTemplate + + +class CenterPoint(Detector3DTemplate): + def __init__(self, model_cfg, num_class, dataset): + super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) + self.module_list = self.build_networks() + + def forward(self, batch_dict): + for cur_module in self.module_list: + batch_dict = cur_module(batch_dict) + + if self.training: + loss, tb_dict, disp_dict = self.get_training_loss() + + ret_dict = { + 'loss': loss + } + return ret_dict, tb_dict, disp_dict + else: + pred_dicts, recall_dicts = self.post_processing(batch_dict) + return pred_dicts, recall_dicts + + def get_training_loss(self): + disp_dict = {} + + loss_rpn, tb_dict = self.dense_head.get_loss() + tb_dict = { + 'loss_rpn': loss_rpn.item(), + **tb_dict + } + + loss = loss_rpn + return loss, tb_dict, disp_dict + + def post_processing(self, batch_dict): + post_process_cfg = self.model_cfg.POST_PROCESSING + batch_size = batch_dict['batch_size'] + final_pred_dict = batch_dict['final_box_dicts'] + recall_dict = {} + for index in range(batch_size): + pred_boxes = final_pred_dict[index]['pred_boxes'] + + recall_dict = self.generate_recall_record( + box_preds=pred_boxes, + recall_dict=recall_dict, batch_index=index, data_dict=batch_dict, + thresh_list=post_process_cfg.RECALL_THRESH_LIST + ) + + return final_pred_dict, recall_dict diff --git a/toolbox/openpcdet/pcdet/models/detectors/detector3d_template.py b/toolbox/openpcdet/pcdet/models/detectors/detector3d_template.py new file mode 100644 index 000000000..91e44bd46 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/detectors/detector3d_template.py @@ -0,0 +1,415 @@ +import os + +import torch +import torch.nn as nn +import numpy as np +from ...ops.iou3d_nms import iou3d_nms_utils +from ...utils.spconv_utils import find_all_spconv_keys +from .. import backbones_2d, backbones_3d, dense_heads, roi_heads +from ..backbones_2d import map_to_bev +from ..backbones_3d import pfe, vfe +from ..model_utils import model_nms_utils + + +class Detector3DTemplate(nn.Module): + def __init__(self, model_cfg, num_class, dataset): + super().__init__() + self.model_cfg = model_cfg + self.num_class = num_class + self.dataset = dataset + self.class_names = dataset.class_names + self.register_buffer('global_step', torch.LongTensor(1).zero_()) + + self.module_topology = [ + 'vfe', 'backbone_3d', 'map_to_bev_module', 'pfe', + 'backbone_2d', 'dense_head', 'point_head', 'roi_head' + ] + + @property + def mode(self): + return 'TRAIN' if self.training else 'TEST' + + def update_global_step(self): + self.global_step += 1 + + def build_networks(self): + model_info_dict = { + 'module_list': [], + 'num_rawpoint_features': self.dataset.point_feature_encoder.num_point_features, + 'num_point_features': self.dataset.point_feature_encoder.num_point_features, + 'grid_size': self.dataset.grid_size, + 'point_cloud_range': self.dataset.point_cloud_range, + 'voxel_size': self.dataset.voxel_size, + 'depth_downsample_factor': self.dataset.depth_downsample_factor + } + for module_name in self.module_topology: + module, model_info_dict = getattr(self, 'build_%s' % module_name)( + model_info_dict=model_info_dict + ) + self.add_module(module_name, module) + return model_info_dict['module_list'] + + def build_vfe(self, model_info_dict): + if self.model_cfg.get('VFE', None) is None: + return None, model_info_dict + + vfe_module = vfe.__all__[self.model_cfg.VFE.NAME]( + model_cfg=self.model_cfg.VFE, + num_point_features=model_info_dict['num_rawpoint_features'], + point_cloud_range=model_info_dict['point_cloud_range'], + voxel_size=model_info_dict['voxel_size'], + grid_size=model_info_dict['grid_size'], + depth_downsample_factor=model_info_dict['depth_downsample_factor'] + ) + model_info_dict['num_point_features'] = vfe_module.get_output_feature_dim() + model_info_dict['module_list'].append(vfe_module) + return vfe_module, model_info_dict + + def build_backbone_3d(self, model_info_dict): + if self.model_cfg.get('BACKBONE_3D', None) is None: + return None, model_info_dict + + backbone_3d_module = backbones_3d.__all__[self.model_cfg.BACKBONE_3D.NAME]( + model_cfg=self.model_cfg.BACKBONE_3D, + input_channels=model_info_dict['num_point_features'], + grid_size=model_info_dict['grid_size'], + voxel_size=model_info_dict['voxel_size'], + point_cloud_range=model_info_dict['point_cloud_range'] + ) + model_info_dict['module_list'].append(backbone_3d_module) + model_info_dict['num_point_features'] = backbone_3d_module.num_point_features + model_info_dict['backbone_channels'] = backbone_3d_module.backbone_channels \ + if hasattr(backbone_3d_module, 'backbone_channels') else None + return backbone_3d_module, model_info_dict + + def build_map_to_bev_module(self, model_info_dict): + if self.model_cfg.get('MAP_TO_BEV', None) is None: + return None, model_info_dict + + map_to_bev_module = map_to_bev.__all__[self.model_cfg.MAP_TO_BEV.NAME]( + model_cfg=self.model_cfg.MAP_TO_BEV, + grid_size=model_info_dict['grid_size'] + ) + model_info_dict['module_list'].append(map_to_bev_module) + model_info_dict['num_bev_features'] = map_to_bev_module.num_bev_features + return map_to_bev_module, model_info_dict + + def build_backbone_2d(self, model_info_dict): + if self.model_cfg.get('BACKBONE_2D', None) is None: + return None, model_info_dict + + backbone_2d_module = backbones_2d.__all__[self.model_cfg.BACKBONE_2D.NAME]( + model_cfg=self.model_cfg.BACKBONE_2D, + input_channels=model_info_dict.get('num_bev_features', None) + ) + model_info_dict['module_list'].append(backbone_2d_module) + model_info_dict['num_bev_features'] = backbone_2d_module.num_bev_features + return backbone_2d_module, model_info_dict + + def build_pfe(self, model_info_dict): + if self.model_cfg.get('PFE', None) is None: + return None, model_info_dict + + pfe_module = pfe.__all__[self.model_cfg.PFE.NAME]( + model_cfg=self.model_cfg.PFE, + voxel_size=model_info_dict['voxel_size'], + point_cloud_range=model_info_dict['point_cloud_range'], + num_bev_features=model_info_dict['num_bev_features'], + num_rawpoint_features=model_info_dict['num_rawpoint_features'] + ) + model_info_dict['module_list'].append(pfe_module) + model_info_dict['num_point_features'] = pfe_module.num_point_features + model_info_dict['num_point_features_before_fusion'] = pfe_module.num_point_features_before_fusion + return pfe_module, model_info_dict + + def build_dense_head(self, model_info_dict): + if self.model_cfg.get('DENSE_HEAD', None) is None: + return None, model_info_dict + dense_head_module = dense_heads.__all__[self.model_cfg.DENSE_HEAD.NAME]( + model_cfg=self.model_cfg.DENSE_HEAD, + input_channels=model_info_dict['num_bev_features'] if 'num_bev_features' in model_info_dict else self.model_cfg.DENSE_HEAD.INPUT_FEATURES, + num_class=self.num_class if not self.model_cfg.DENSE_HEAD.CLASS_AGNOSTIC else 1, + class_names=self.class_names, + grid_size=model_info_dict['grid_size'], + point_cloud_range=model_info_dict['point_cloud_range'], + predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False), + voxel_size=model_info_dict.get('voxel_size', False) + ) + model_info_dict['module_list'].append(dense_head_module) + return dense_head_module, model_info_dict + + def build_point_head(self, model_info_dict): + if self.model_cfg.get('POINT_HEAD', None) is None: + return None, model_info_dict + + if self.model_cfg.POINT_HEAD.get('USE_POINT_FEATURES_BEFORE_FUSION', False): + num_point_features = model_info_dict['num_point_features_before_fusion'] + else: + num_point_features = model_info_dict['num_point_features'] + + point_head_module = dense_heads.__all__[self.model_cfg.POINT_HEAD.NAME]( + model_cfg=self.model_cfg.POINT_HEAD, + input_channels=num_point_features, + num_class=self.num_class if not self.model_cfg.POINT_HEAD.CLASS_AGNOSTIC else 1, + predict_boxes_when_training=self.model_cfg.get('ROI_HEAD', False) + ) + + model_info_dict['module_list'].append(point_head_module) + return point_head_module, model_info_dict + + def build_roi_head(self, model_info_dict): + if self.model_cfg.get('ROI_HEAD', None) is None: + return None, model_info_dict + point_head_module = roi_heads.__all__[self.model_cfg.ROI_HEAD.NAME]( + model_cfg=self.model_cfg.ROI_HEAD, + input_channels=model_info_dict['num_point_features'], + backbone_channels= model_info_dict.get('backbone_channels', None), + point_cloud_range=model_info_dict['point_cloud_range'], + voxel_size=model_info_dict['voxel_size'], + num_class=self.num_class if not self.model_cfg.ROI_HEAD.CLASS_AGNOSTIC else 1, + ) + + model_info_dict['module_list'].append(point_head_module) + return point_head_module, model_info_dict + + def forward(self, **kwargs): + raise NotImplementedError + + def post_processing(self, batch_dict): + """ + Args: + batch_dict: + batch_size: + batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1) + or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...] + multihead_label_mapping: [(num_class1), (num_class2), ...] + batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C) + cls_preds_normalized: indicate whether batch_cls_preds is normalized + batch_index: optional (N1+N2+...) + has_class_labels: True/False + roi_labels: (B, num_rois) 1 .. num_classes + batch_pred_labels: (B, num_boxes, 1) + Returns: + + """ + post_process_cfg = self.model_cfg.POST_PROCESSING + batch_size = batch_dict['batch_size'] + recall_dict = {} + pred_dicts = [] + for index in range(batch_size): + if batch_dict.get('batch_index', None) is not None: + assert batch_dict['batch_box_preds'].shape.__len__() == 2 + batch_mask = (batch_dict['batch_index'] == index) + else: + assert batch_dict['batch_box_preds'].shape.__len__() == 3 + batch_mask = index + + box_preds = batch_dict['batch_box_preds'][batch_mask] + src_box_preds = box_preds + + if not isinstance(batch_dict['batch_cls_preds'], list): + cls_preds = batch_dict['batch_cls_preds'][batch_mask] + + src_cls_preds = cls_preds + assert cls_preds.shape[1] in [1, self.num_class] + + if not batch_dict['cls_preds_normalized']: + cls_preds = torch.sigmoid(cls_preds) + else: + cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']] + src_cls_preds = cls_preds + if not batch_dict['cls_preds_normalized']: + cls_preds = [torch.sigmoid(x) for x in cls_preds] + + if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS: + if not isinstance(cls_preds, list): + cls_preds = [cls_preds] + multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)] + else: + multihead_label_mapping = batch_dict['multihead_label_mapping'] + + cur_start_idx = 0 + pred_scores, pred_labels, pred_boxes = [], [], [] + for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping): + assert cur_cls_preds.shape[1] == len(cur_label_mapping) + cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]] + cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms( + cls_scores=cur_cls_preds, box_preds=cur_box_preds, + nms_config=post_process_cfg.NMS_CONFIG, + score_thresh=post_process_cfg.SCORE_THRESH + ) + cur_pred_labels = cur_label_mapping[cur_pred_labels] + pred_scores.append(cur_pred_scores) + pred_labels.append(cur_pred_labels) + pred_boxes.append(cur_pred_boxes) + cur_start_idx += cur_cls_preds.shape[0] + + final_scores = torch.cat(pred_scores, dim=0) + final_labels = torch.cat(pred_labels, dim=0) + final_boxes = torch.cat(pred_boxes, dim=0) + else: + cls_preds, label_preds = torch.max(cls_preds, dim=-1) + if batch_dict.get('has_class_labels', False): + label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels' + label_preds = batch_dict[label_key][index] + else: + label_preds = label_preds + 1 + selected, selected_scores = model_nms_utils.class_agnostic_nms( + box_scores=cls_preds, box_preds=box_preds, + nms_config=post_process_cfg.NMS_CONFIG, + score_thresh=post_process_cfg.SCORE_THRESH + ) + + if post_process_cfg.OUTPUT_RAW_SCORE: + max_cls_preds, _ = torch.max(src_cls_preds, dim=-1) + selected_scores = max_cls_preds[selected] + + final_scores = selected_scores + final_labels = label_preds[selected] + final_boxes = box_preds[selected] + + recall_dict = self.generate_recall_record( + box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds, + recall_dict=recall_dict, batch_index=index, data_dict=batch_dict, + thresh_list=post_process_cfg.RECALL_THRESH_LIST + ) + + record_dict = { + 'pred_boxes': final_boxes, + 'pred_scores': final_scores, + 'pred_labels': final_labels + } + pred_dicts.append(record_dict) + + return pred_dicts, recall_dict + + @staticmethod + def generate_recall_record(box_preds, recall_dict, batch_index, data_dict=None, thresh_list=None): + if 'gt_boxes' not in data_dict: + return recall_dict + + rois = data_dict['rois'][batch_index] if 'rois' in data_dict else None + gt_boxes = data_dict['gt_boxes'][batch_index] + + if recall_dict.__len__() == 0: + recall_dict = {'gt': 0} + for cur_thresh in thresh_list: + recall_dict['roi_%s' % (str(cur_thresh))] = 0 + recall_dict['rcnn_%s' % (str(cur_thresh))] = 0 + + cur_gt = gt_boxes + k = cur_gt.__len__() - 1 + while k >= 0 and cur_gt[k].sum() == 0: + k -= 1 + cur_gt = cur_gt[:k + 1] + + if cur_gt.shape[0] > 0: + if box_preds.shape[0] > 0: + iou3d_rcnn = iou3d_nms_utils.boxes_iou3d_gpu(box_preds[:, 0:7], cur_gt[:, 0:7]) + else: + iou3d_rcnn = torch.zeros((0, cur_gt.shape[0])) + + if rois is not None: + iou3d_roi = iou3d_nms_utils.boxes_iou3d_gpu(rois[:, 0:7], cur_gt[:, 0:7]) + + for cur_thresh in thresh_list: + if iou3d_rcnn.shape[0] == 0: + recall_dict['rcnn_%s' % str(cur_thresh)] += 0 + else: + rcnn_recalled = (iou3d_rcnn.max(dim=0)[0] > cur_thresh).sum().item() + recall_dict['rcnn_%s' % str(cur_thresh)] += rcnn_recalled + if rois is not None: + roi_recalled = (iou3d_roi.max(dim=0)[0] > cur_thresh).sum().item() + recall_dict['roi_%s' % str(cur_thresh)] += roi_recalled + + recall_dict['gt'] += cur_gt.shape[0] + else: + gt_iou = box_preds.new_zeros(box_preds.shape[0]) + return recall_dict + + def _load_state_dict(self, model_state_disk, *, strict=True): + state_dict = self.state_dict() # local cache of state_dict + + spconv_keys = find_all_spconv_keys(self) + + update_model_state = {} + for key, val in model_state_disk.items(): + if key in spconv_keys and key in state_dict and state_dict[key].shape != val.shape: + # with different spconv versions, we need to adapt weight shapes for spconv blocks + # adapt spconv weights from version 1.x to version 2.x if you used weights from spconv 1.x + + val_native = val.transpose(-1, -2) # (k1, k2, k3, c_in, c_out) to (k1, k2, k3, c_out, c_in) + if val_native.shape == state_dict[key].shape: + val = val_native.contiguous() + else: + assert val.shape.__len__() == 5, 'currently only spconv 3D is supported' + val_implicit = val.permute(4, 0, 1, 2, 3) # (k1, k2, k3, c_in, c_out) to (c_out, k1, k2, k3, c_in) + if val_implicit.shape == state_dict[key].shape: + val = val_implicit.contiguous() + + if key in state_dict and state_dict[key].shape == val.shape: + update_model_state[key] = val + # logger.info('Update weight %s: %s' % (key, str(val.shape))) + + if strict: + self.load_state_dict(update_model_state) + else: + state_dict.update(update_model_state) + self.load_state_dict(state_dict) + return state_dict, update_model_state + + def load_params_from_file(self, filename, logger, to_cpu=False, pre_trained_path=None): + if not os.path.isfile(filename): + raise FileNotFoundError + + logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU')) + loc_type = torch.device('cpu') if to_cpu else None + checkpoint = torch.load(filename, map_location=loc_type) + model_state_disk = checkpoint['model_state'] + if not pre_trained_path is None: + pretrain_checkpoint = torch.load(pre_trained_path, map_location=loc_type) + pretrain_model_state_disk = pretrain_checkpoint['model_state'] + model_state_disk.update(pretrain_model_state_disk) + + version = checkpoint.get("version", None) + if version is not None: + logger.info('==> Checkpoint trained from version: %s' % version) + + state_dict, update_model_state = self._load_state_dict(model_state_disk, strict=False) + + for key in state_dict: + if key not in update_model_state: + logger.info('Not updated weight %s: %s' % (key, str(state_dict[key].shape))) + + logger.info('==> Done (loaded %d/%d)' % (len(update_model_state), len(state_dict))) + + def load_params_with_optimizer(self, filename, to_cpu=False, optimizer=None, logger=None): + if not os.path.isfile(filename): + raise FileNotFoundError + + logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU')) + loc_type = torch.device('cpu') if to_cpu else None + checkpoint = torch.load(filename, map_location=loc_type) + epoch = checkpoint.get('epoch', -1) + it = checkpoint.get('it', 0.0) + + self._load_state_dict(checkpoint['model_state'], strict=True) + + if optimizer is not None: + if 'optimizer_state' in checkpoint and checkpoint['optimizer_state'] is not None: + logger.info('==> Loading optimizer parameters from checkpoint %s to %s' + % (filename, 'CPU' if to_cpu else 'GPU')) + optimizer.load_state_dict(checkpoint['optimizer_state']) + else: + assert filename[-4] == '.', filename + src_file, ext = filename[:-4], filename[-3:] + optimizer_filename = '%s_optim.%s' % (src_file, ext) + if os.path.exists(optimizer_filename): + optimizer_ckpt = torch.load(optimizer_filename, map_location=loc_type) + optimizer.load_state_dict(optimizer_ckpt['optimizer_state']) + + if 'version' in checkpoint: + print('==> Checkpoint trained from version: %s' % checkpoint['version']) + logger.info('==> Done') + + return it, epoch diff --git a/toolbox/openpcdet/pcdet/models/detectors/mppnet.py b/toolbox/openpcdet/pcdet/models/detectors/mppnet.py new file mode 100644 index 000000000..10eeb6873 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/detectors/mppnet.py @@ -0,0 +1,181 @@ +import torch +from .detector3d_template import Detector3DTemplate +from pcdet.ops.iou3d_nms import iou3d_nms_utils +import os +import numpy as np +import time +from ...utils import common_utils +from ..model_utils import model_nms_utils +from pcdet.datasets.augmentor import augmentor_utils, database_sampler + + +class MPPNet(Detector3DTemplate): + def __init__(self, model_cfg, num_class, dataset): + super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) + self.module_list = self.build_networks() + + def forward(self, batch_dict): + batch_dict['proposals_list'] = batch_dict['roi_boxes'] + for cur_module in self.module_list[:]: + batch_dict = cur_module(batch_dict) + + if self.training: + loss, tb_dict, disp_dict = self.get_training_loss() + + ret_dict = { + 'loss': loss + } + + return ret_dict, tb_dict, disp_dict + else: + + pred_dicts, recall_dicts = self.post_processing(batch_dict) + + return pred_dicts, recall_dicts + + def get_training_loss(self): + disp_dict = {} + tb_dict ={} + loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict) + loss = loss_rcnn + + return loss, tb_dict, disp_dict + + def post_processing(self, batch_dict): + """ + Args: + batch_dict: + batch_size: + batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1) + or [(B, num_boxes, num_class1), (B, num_boxes, num_class2) ...] + multihead_label_mapping: [(num_class1), (num_class2), ...] + batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C) + cls_preds_normalized: indicate whether batch_cls_preds is normalized + batch_index: optional (N1+N2+...) + has_class_labels: True/False + roi_labels: (B, num_rois) 1 .. num_classes + batch_pred_labels: (B, num_boxes, 1) + Returns: + + """ + post_process_cfg = self.model_cfg.POST_PROCESSING + batch_size = batch_dict['batch_size'] + recall_dict = {} + pred_dicts = [] + for index in range(batch_size): + if batch_dict.get('batch_index', None) is not None: + assert batch_dict['batch_box_preds'].shape.__len__() == 2 + batch_mask = (batch_dict['batch_index'] == index) + else: + assert batch_dict['batch_box_preds'].shape.__len__() == 3 + batch_mask = index + + box_preds = batch_dict['batch_box_preds'][batch_mask] + src_box_preds = box_preds + if not isinstance(batch_dict['batch_cls_preds'], list): + cls_preds = batch_dict['batch_cls_preds'][batch_mask] + + src_cls_preds = cls_preds + assert cls_preds.shape[1] in [1, self.num_class] + + if not batch_dict['cls_preds_normalized']: + cls_preds = torch.sigmoid(cls_preds) + else: + cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']] + src_cls_preds = cls_preds + if not batch_dict['cls_preds_normalized']: + cls_preds = [torch.sigmoid(x) for x in cls_preds] + + if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS: + if not isinstance(cls_preds, list): + cls_preds = [cls_preds] + multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)] + else: + multihead_label_mapping = batch_dict['multihead_label_mapping'] + + cur_start_idx = 0 + pred_scores, pred_labels, pred_boxes = [], [], [] + for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping): + assert cur_cls_preds.shape[1] == len(cur_label_mapping) + cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]] + cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms( + cls_scores=cur_cls_preds, box_preds=cur_box_preds, + nms_config=post_process_cfg.NMS_CONFIG, + score_thresh=post_process_cfg.SCORE_THRESH + ) + cur_pred_labels = cur_label_mapping[cur_pred_labels] + pred_scores.append(cur_pred_scores) + pred_labels.append(cur_pred_labels) + pred_boxes.append(cur_pred_boxes) + cur_start_idx += cur_cls_preds.shape[0] + + final_scores = torch.cat(pred_scores, dim=0) + final_labels = torch.cat(pred_labels, dim=0) + final_boxes = torch.cat(pred_boxes, dim=0) + else: + try: + cls_preds, label_preds = torch.max(cls_preds, dim=-1) + except: + record_dict = { + 'pred_boxes': torch.tensor([]), + 'pred_scores': torch.tensor([]), + 'pred_labels': torch.tensor([]) + } + pred_dicts.append(record_dict) + continue + + if batch_dict.get('has_class_labels', False): + label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels' + label_preds = batch_dict[label_key][index] + else: + label_preds = label_preds + 1 + + selected, selected_scores = model_nms_utils.class_agnostic_nms( + box_scores=cls_preds, box_preds=box_preds, + nms_config=post_process_cfg.NMS_CONFIG, + score_thresh=post_process_cfg.SCORE_THRESH + ) + + if post_process_cfg.OUTPUT_RAW_SCORE: + max_cls_preds, _ = torch.max(src_cls_preds, dim=-1) + selected_scores = max_cls_preds[selected] + + final_scores = selected_scores + final_labels = label_preds[selected] + final_boxes = box_preds[selected] + + ######### Car DONOT Using NMS ###### + if post_process_cfg.get('NOT_APPLY_NMS_FOR_VEL',False): + + pedcyc_mask = final_labels !=1 + final_scores_pedcyc = final_scores[pedcyc_mask] + final_labels_pedcyc = final_labels[pedcyc_mask] + final_boxes_pedcyc = final_boxes[pedcyc_mask] + + car_mask = (label_preds==1) & (cls_preds > post_process_cfg.SCORE_THRESH) + final_scores_car = cls_preds[car_mask] + final_labels_car = label_preds[car_mask] + final_boxes_car = box_preds[car_mask] + + final_scores = torch.cat([final_scores_car,final_scores_pedcyc],0) + final_labels = torch.cat([final_labels_car,final_labels_pedcyc],0) + final_boxes = torch.cat([final_boxes_car,final_boxes_pedcyc],0) + + ######### Car DONOT Using NMS ###### + + recall_dict = self.generate_recall_record( + box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds, + recall_dict=recall_dict, batch_index=index, data_dict=batch_dict, + thresh_list=post_process_cfg.RECALL_THRESH_LIST + ) + + + record_dict = { + 'pred_boxes': final_boxes[:,:7], + 'pred_scores': final_scores, + 'pred_labels': final_labels + } + pred_dicts.append(record_dict) + + return pred_dicts, recall_dict + diff --git a/toolbox/openpcdet/pcdet/models/detectors/mppnet_e2e.py b/toolbox/openpcdet/pcdet/models/detectors/mppnet_e2e.py new file mode 100644 index 000000000..7561c9b9a --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/detectors/mppnet_e2e.py @@ -0,0 +1,222 @@ +import torch +import os +import numpy as np +import copy +from ...utils import common_utils +from ..model_utils import model_nms_utils +from .detector3d_template import Detector3DTemplate +from pcdet.ops.iou3d_nms import iou3d_nms_utils +from pcdet.datasets.augmentor import augmentor_utils, database_sampler + + +class MPPNetE2E(Detector3DTemplate): + def __init__(self, model_cfg, num_class, dataset): + super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) + self.module_list = self.build_networks() + + self.module_topology = [ + 'vfe', 'backbone_3d', 'map_to_bev_module', + 'backbone_2d', 'dense_head','roi_head' + ] + + self.num_frames = self.model_cfg.ROI_HEAD.Transformer.num_frames + + def reset_memorybank(self): + self.memory_rois = None + self.memory_labels = None + self.memory_scores = None + self.memory_feature = None + + def forward(self, batch_dict): + + if batch_dict['sample_idx'][0] ==0: + self.reset_memorybank() + batch_dict['memory_bank'] = {} + else: + batch_dict['memory_bank'] = {'feature_bank':self.memory_feature} + + if self.num_frames ==16: + batch_dict['points_backup'] = batch_dict['points'].clone() + time_mask = batch_dict['points'][:,-1] < 0.31 # centerpoint RPN only use 4frames + batch_dict['points'] = batch_dict['points'][time_mask] + + for idx, cur_module in enumerate(self.module_list): + batch_dict = cur_module(batch_dict) + if self.module_topology[idx] == 'dense_head': + + if self.memory_rois is None: + self.memory_rois = [batch_dict['rois']]*self.num_frames + self.memory_labels = [batch_dict['roi_labels'][:,:,None]]*self.num_frames + self.memory_scores = [batch_dict['roi_scores'][:,:,None]]*self.num_frames + else: + self.memory_rois.pop() + self.memory_rois.insert(0,batch_dict['rois']) + self.memory_labels.pop() + self.memory_labels.insert(0,batch_dict['roi_labels'][:,:,None]) + self.memory_scores.pop() + self.memory_scores.insert(0,batch_dict['roi_scores'][:,:,None]) + + + batch_dict['memory_bank'].update({'rois': self.memory_rois, + 'roi_labels': self.memory_labels, + 'roi_scores': self.memory_scores}) + + + if self.module_topology[idx] == 'roi_head': + if self.memory_feature is None: + self.memory_feature = [batch_dict['geometory_feature_memory'][:,:64]]*self.num_frames + + else: + self.memory_feature.pop() + self.memory_feature.insert(0,batch_dict['geometory_feature_memory'][:,:64]) + + + if self.training: + loss, tb_dict, disp_dict = self.get_training_loss() + + ret_dict = { + 'loss': loss + } + return ret_dict, tb_dict, disp_dict + else: + pred_dicts, recall_dicts = self.post_processing(batch_dict) + + return pred_dicts, recall_dicts + + + def get_training_loss(self): + disp_dict = {} + + loss_rpn, tb_dict = self.dense_head.get_loss() + tb_dict = { + 'loss_rpn': loss_rpn.item(), + **tb_dict + } + + loss = loss_rpn + return loss, tb_dict, disp_dict + + + def post_processing(self, batch_dict): + + post_process_cfg = self.model_cfg.POST_PROCESSING + batch_size = batch_dict['batch_size'] + recall_dict = {} + pred_dicts = [] + for index in range(batch_size): + if batch_dict.get('batch_index', None) is not None: + assert batch_dict['batch_box_preds'].shape.__len__() == 2 + batch_mask = (batch_dict['batch_index'] == index) + else: + assert batch_dict['batch_box_preds'].shape.__len__() == 3 + batch_mask = index + + box_preds = batch_dict['batch_box_preds'][batch_mask] + src_box_preds = box_preds + if not isinstance(batch_dict['batch_cls_preds'], list): + cls_preds = batch_dict['batch_cls_preds'][batch_mask] + + src_cls_preds = cls_preds + assert cls_preds.shape[1] in [1, self.num_class] + + if not batch_dict['cls_preds_normalized']: + cls_preds = torch.sigmoid(cls_preds) + else: + cls_preds = [x[batch_mask] for x in batch_dict['batch_cls_preds']] + src_cls_preds = cls_preds + if not batch_dict['cls_preds_normalized']: + cls_preds = [torch.sigmoid(x) for x in cls_preds] + + if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS: + if not isinstance(cls_preds, list): + cls_preds = [cls_preds] + multihead_label_mapping = [torch.arange(1, self.num_class, device=cls_preds[0].device)] + else: + multihead_label_mapping = batch_dict['multihead_label_mapping'] + + cur_start_idx = 0 + pred_scores, pred_labels, pred_boxes = [], [], [] + for cur_cls_preds, cur_label_mapping in zip(cls_preds, multihead_label_mapping): + assert cur_cls_preds.shape[1] == len(cur_label_mapping) + cur_box_preds = box_preds[cur_start_idx: cur_start_idx + cur_cls_preds.shape[0]] + cur_pred_scores, cur_pred_labels, cur_pred_boxes = model_nms_utils.multi_classes_nms( + cls_scores=cur_cls_preds, box_preds=cur_box_preds, + nms_config=post_process_cfg.NMS_CONFIG, + score_thresh=post_process_cfg.SCORE_THRESH + ) + cur_pred_labels = cur_label_mapping[cur_pred_labels] + pred_scores.append(cur_pred_scores) + pred_labels.append(cur_pred_labels) + pred_boxes.append(cur_pred_boxes) + cur_start_idx += cur_cls_preds.shape[0] + + final_scores = torch.cat(pred_scores, dim=0) + final_labels = torch.cat(pred_labels, dim=0) + final_boxes = torch.cat(pred_boxes, dim=0) + else: + try: + cls_preds, label_preds = torch.max(cls_preds, dim=-1) + except: + record_dict = { + 'pred_boxes': torch.tensor([]), + 'pred_scores': torch.tensor([]), + 'pred_labels': torch.tensor([]) + } + pred_dicts.append(record_dict) + continue + + if batch_dict.get('has_class_labels', False): + label_key = 'roi_labels' if 'roi_labels' in batch_dict else 'batch_pred_labels' + label_preds = batch_dict[label_key][index] + else: + label_preds = label_preds + 1 + + selected, selected_scores = model_nms_utils.class_agnostic_nms( + box_scores=cls_preds, box_preds=box_preds, + nms_config=post_process_cfg.NMS_CONFIG, + score_thresh=post_process_cfg.SCORE_THRESH + ) + + if post_process_cfg.OUTPUT_RAW_SCORE: + max_cls_preds, _ = torch.max(src_cls_preds, dim=-1) + selected_scores = max_cls_preds[selected] + + final_scores = selected_scores + final_labels = label_preds[selected] + final_boxes = box_preds[selected] + + ######### Car DONOT Using NMS ###### + if post_process_cfg.get('NOT_APPLY_NMS_FOR_VEL',False): + + pedcyc_mask = final_labels !=1 + final_scores_pedcyc = final_scores[pedcyc_mask] + final_labels_pedcyc = final_labels[pedcyc_mask] + final_boxes_pedcyc = final_boxes[pedcyc_mask] + + car_mask = (label_preds==1) & (cls_preds > post_process_cfg.SCORE_THRESH) + final_scores_car = cls_preds[car_mask] + final_labels_car = label_preds[car_mask] + final_boxes_car = box_preds[car_mask] + + final_scores = torch.cat([final_scores_car,final_scores_pedcyc],0) + final_labels = torch.cat([final_labels_car,final_labels_pedcyc],0) + final_boxes = torch.cat([final_boxes_car,final_boxes_pedcyc],0) + + ######### Car DONOT Using NMS ###### + + recall_dict = self.generate_recall_record( + box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds, + recall_dict=recall_dict, batch_index=index, data_dict=batch_dict, + thresh_list=post_process_cfg.RECALL_THRESH_LIST + ) + + + record_dict = { + 'pred_boxes': final_boxes[:,:7], + 'pred_scores': final_scores, + 'pred_labels': final_labels + } + pred_dicts.append(record_dict) + + return pred_dicts, recall_dict + diff --git a/toolbox/openpcdet/pcdet/models/detectors/pillarnet.py b/toolbox/openpcdet/pcdet/models/detectors/pillarnet.py new file mode 100644 index 000000000..965a44163 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/detectors/pillarnet.py @@ -0,0 +1,50 @@ +from .detector3d_template import Detector3DTemplate + + +class PillarNet(Detector3DTemplate): + def __init__(self, model_cfg, num_class, dataset): + super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) + self.module_list = self.build_networks() + + def forward(self, batch_dict): + for cur_module in self.module_list: + batch_dict = cur_module(batch_dict) + + if self.training: + loss, tb_dict, disp_dict = self.get_training_loss() + + ret_dict = { + 'loss': loss + } + return ret_dict, tb_dict, disp_dict + else: + pred_dicts, recall_dicts = self.post_processing(batch_dict) + return pred_dicts, recall_dicts + + def get_training_loss(self): + disp_dict = {} + + loss_rpn, tb_dict = self.dense_head.get_loss() + tb_dict = { + 'loss_rpn': loss_rpn.item(), + **tb_dict + } + + loss = loss_rpn + return loss, tb_dict, disp_dict + + def post_processing(self, batch_dict): + post_process_cfg = self.model_cfg.POST_PROCESSING + batch_size = batch_dict['batch_size'] + final_pred_dict = batch_dict['final_box_dicts'] + recall_dict = {} + for index in range(batch_size): + pred_boxes = final_pred_dict[index]['pred_boxes'] + + recall_dict = self.generate_recall_record( + box_preds=pred_boxes, + recall_dict=recall_dict, batch_index=index, data_dict=batch_dict, + thresh_list=post_process_cfg.RECALL_THRESH_LIST + ) + + return final_pred_dict, recall_dict \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/models/detectors/point_rcnn.py b/toolbox/openpcdet/pcdet/models/detectors/point_rcnn.py new file mode 100644 index 000000000..5d26224fd --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/detectors/point_rcnn.py @@ -0,0 +1,30 @@ +from .detector3d_template import Detector3DTemplate + + +class PointRCNN(Detector3DTemplate): + def __init__(self, model_cfg, num_class, dataset): + super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) + self.module_list = self.build_networks() + + def forward(self, batch_dict): + for cur_module in self.module_list: + batch_dict = cur_module(batch_dict) + + if self.training: + loss, tb_dict, disp_dict = self.get_training_loss() + + ret_dict = { + 'loss': loss + } + return ret_dict, tb_dict, disp_dict + else: + pred_dicts, recall_dicts = self.post_processing(batch_dict) + return pred_dicts, recall_dicts + + def get_training_loss(self): + disp_dict = {} + loss_point, tb_dict = self.point_head.get_loss() + loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict) + + loss = loss_point + loss_rcnn + return loss, tb_dict, disp_dict diff --git a/toolbox/openpcdet/pcdet/models/detectors/pointpillar.py b/toolbox/openpcdet/pcdet/models/detectors/pointpillar.py new file mode 100644 index 000000000..e21f8261a --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/detectors/pointpillar.py @@ -0,0 +1,34 @@ +from .detector3d_template import Detector3DTemplate + + +class PointPillar(Detector3DTemplate): + def __init__(self, model_cfg, num_class, dataset): + super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) + self.module_list = self.build_networks() + + def forward(self, batch_dict): + for cur_module in self.module_list: + batch_dict = cur_module(batch_dict) + + if self.training: + loss, tb_dict, disp_dict = self.get_training_loss() + + ret_dict = { + 'loss': loss + } + return ret_dict, tb_dict, disp_dict + else: + pred_dicts, recall_dicts = self.post_processing(batch_dict) + return pred_dicts, recall_dicts + + def get_training_loss(self): + disp_dict = {} + + loss_rpn, tb_dict = self.dense_head.get_loss() + tb_dict = { + 'loss_rpn': loss_rpn.item(), + **tb_dict + } + + loss = loss_rpn + return loss, tb_dict, disp_dict diff --git a/toolbox/openpcdet/pcdet/models/detectors/pv_rcnn.py b/toolbox/openpcdet/pcdet/models/detectors/pv_rcnn.py new file mode 100644 index 000000000..4808513a2 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/detectors/pv_rcnn.py @@ -0,0 +1,36 @@ +from .detector3d_template import Detector3DTemplate + + +class PVRCNN(Detector3DTemplate): + def __init__(self, model_cfg, num_class, dataset): + super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) + self.module_list = self.build_networks() + + def forward(self, batch_dict): + for cur_module in self.module_list: + batch_dict = cur_module(batch_dict) + + if self.training: + loss, tb_dict, disp_dict = self.get_training_loss() + + ret_dict = { + 'loss': loss + } + return ret_dict, tb_dict, disp_dict + else: + pred_dicts, recall_dicts = self.post_processing(batch_dict) + return pred_dicts, recall_dicts + + def get_training_loss(self): + disp_dict = {} + loss_rpn, tb_dict = self.dense_head.get_loss() + loss_point, tb_dict = self.point_head.get_loss(tb_dict) + loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict) + + loss = loss_rpn + loss_point + loss_rcnn + + if hasattr(self.backbone_3d, 'get_loss'): + loss_backbone3d, tb_dict = self.backbone_3d.get_loss(tb_dict) + loss += loss_backbone3d + + return loss, tb_dict, disp_dict diff --git a/toolbox/openpcdet/pcdet/models/detectors/pv_rcnn_plusplus.py b/toolbox/openpcdet/pcdet/models/detectors/pv_rcnn_plusplus.py new file mode 100644 index 000000000..2c64e6782 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/detectors/pv_rcnn_plusplus.py @@ -0,0 +1,53 @@ +from .detector3d_template import Detector3DTemplate + + +class PVRCNNPlusPlus(Detector3DTemplate): + def __init__(self, model_cfg, num_class, dataset): + super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) + self.module_list = self.build_networks() + + def forward(self, batch_dict): + batch_dict = self.vfe(batch_dict) + batch_dict = self.backbone_3d(batch_dict) + batch_dict = self.map_to_bev_module(batch_dict) + batch_dict = self.backbone_2d(batch_dict) + batch_dict = self.dense_head(batch_dict) + + batch_dict = self.roi_head.proposal_layer( + batch_dict, nms_config=self.roi_head.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST'] + ) + if self.training: + targets_dict = self.roi_head.assign_targets(batch_dict) + batch_dict['rois'] = targets_dict['rois'] + batch_dict['roi_labels'] = targets_dict['roi_labels'] + batch_dict['roi_targets_dict'] = targets_dict + num_rois_per_scene = targets_dict['rois'].shape[1] + if 'roi_valid_num' in batch_dict: + batch_dict['roi_valid_num'] = [num_rois_per_scene for _ in range(batch_dict['batch_size'])] + + batch_dict = self.pfe(batch_dict) + batch_dict = self.point_head(batch_dict) + batch_dict = self.roi_head(batch_dict) + + if self.training: + loss, tb_dict, disp_dict = self.get_training_loss() + + ret_dict = { + 'loss': loss + } + return ret_dict, tb_dict, disp_dict + else: + pred_dicts, recall_dicts = self.post_processing(batch_dict) + return pred_dicts, recall_dicts + + def get_training_loss(self): + disp_dict = {} + loss_rpn, tb_dict = self.dense_head.get_loss() + if self.point_head is not None: + loss_point, tb_dict = self.point_head.get_loss(tb_dict) + else: + loss_point = 0 + loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict) + + loss = loss_rpn + loss_point + loss_rcnn + return loss, tb_dict, disp_dict diff --git a/toolbox/openpcdet/pcdet/models/detectors/second_net.py b/toolbox/openpcdet/pcdet/models/detectors/second_net.py new file mode 100644 index 000000000..c6f2e3679 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/detectors/second_net.py @@ -0,0 +1,34 @@ +from .detector3d_template import Detector3DTemplate + + +class SECONDNet(Detector3DTemplate): + def __init__(self, model_cfg, num_class, dataset): + super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) + self.module_list = self.build_networks() + + def forward(self, batch_dict): + for cur_module in self.module_list: + batch_dict = cur_module(batch_dict) + + if self.training: + loss, tb_dict, disp_dict = self.get_training_loss() + + ret_dict = { + 'loss': loss + } + return ret_dict, tb_dict, disp_dict + else: + pred_dicts, recall_dicts = self.post_processing(batch_dict) + return pred_dicts, recall_dicts + + def get_training_loss(self): + disp_dict = {} + + loss_rpn, tb_dict = self.dense_head.get_loss() + tb_dict = { + 'loss_rpn': loss_rpn.item(), + **tb_dict + } + + loss = loss_rpn + return loss, tb_dict, disp_dict diff --git a/toolbox/openpcdet/pcdet/models/detectors/second_net_iou.py b/toolbox/openpcdet/pcdet/models/detectors/second_net_iou.py new file mode 100644 index 000000000..1fc4e3bd9 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/detectors/second_net_iou.py @@ -0,0 +1,177 @@ +import torch +from .detector3d_template import Detector3DTemplate +from ..model_utils.model_nms_utils import class_agnostic_nms +from ...ops.roiaware_pool3d import roiaware_pool3d_utils + + +class SECONDNetIoU(Detector3DTemplate): + def __init__(self, model_cfg, num_class, dataset): + super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) + self.module_list = self.build_networks() + + def forward(self, batch_dict): + batch_dict['dataset_cfg'] = self.dataset.dataset_cfg + for cur_module in self.module_list: + batch_dict = cur_module(batch_dict) + + if self.training: + loss, tb_dict, disp_dict = self.get_training_loss() + + ret_dict = { + 'loss': loss + } + return ret_dict, tb_dict, disp_dict + else: + pred_dicts, recall_dicts = self.post_processing(batch_dict) + return pred_dicts, recall_dicts + + def get_training_loss(self): + disp_dict = {} + + loss_rpn, tb_dict = self.dense_head.get_loss() + loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict) + + loss = loss_rpn + loss_rcnn + return loss, tb_dict, disp_dict + + @staticmethod + def cal_scores_by_npoints(cls_scores, iou_scores, num_points_in_gt, cls_thresh=10, iou_thresh=100): + """ + Args: + cls_scores: (N) + iou_scores: (N) + num_points_in_gt: (N, 7+c) + cls_thresh: scalar + iou_thresh: scalar + """ + assert iou_thresh >= cls_thresh + alpha = torch.zeros(cls_scores.shape, dtype=torch.float32).cuda() + alpha[num_points_in_gt <= cls_thresh] = 0 + alpha[num_points_in_gt >= iou_thresh] = 1 + + mask = ((num_points_in_gt > cls_thresh) & (num_points_in_gt < iou_thresh)) + alpha[mask] = (num_points_in_gt[mask] - 10) / (iou_thresh - cls_thresh) + + scores = (1 - alpha) * cls_scores + alpha * iou_scores + + return scores + + def set_nms_score_by_class(self, iou_preds, cls_preds, label_preds, score_by_class): + n_classes = torch.unique(label_preds).shape[0] + nms_scores = torch.zeros(iou_preds.shape, dtype=torch.float32).cuda() + for i in range(n_classes): + mask = label_preds == (i + 1) + class_name = self.class_names[i] + score_type = score_by_class[class_name] + if score_type == 'iou': + nms_scores[mask] = iou_preds[mask] + elif score_type == 'cls': + nms_scores[mask] = cls_preds[mask] + else: + raise NotImplementedError + + return nms_scores + + def post_processing(self, batch_dict): + """ + Args: + batch_dict: + batch_size: + batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1) + batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C) + cls_preds_normalized: indicate whether batch_cls_preds is normalized + batch_index: optional (N1+N2+...) + roi_labels: (B, num_rois) 1 .. num_classes + Returns: + + """ + post_process_cfg = self.model_cfg.POST_PROCESSING + batch_size = batch_dict['batch_size'] + recall_dict = {} + pred_dicts = [] + for index in range(batch_size): + if batch_dict.get('batch_index', None) is not None: + assert batch_dict['batch_cls_preds'].shape.__len__() == 2 + batch_mask = (batch_dict['batch_index'] == index) + else: + assert batch_dict['batch_cls_preds'].shape.__len__() == 3 + batch_mask = index + + box_preds = batch_dict['batch_box_preds'][batch_mask] + iou_preds = batch_dict['batch_cls_preds'][batch_mask] + cls_preds = batch_dict['roi_scores'][batch_mask] + + src_iou_preds = iou_preds + src_box_preds = box_preds + src_cls_preds = cls_preds + assert iou_preds.shape[1] in [1, self.num_class] + + if not batch_dict['cls_preds_normalized']: + iou_preds = torch.sigmoid(iou_preds) + cls_preds = torch.sigmoid(cls_preds) + + if post_process_cfg.NMS_CONFIG.MULTI_CLASSES_NMS: + raise NotImplementedError + else: + iou_preds, label_preds = torch.max(iou_preds, dim=-1) + label_preds = batch_dict['roi_labels'][index] if batch_dict.get('has_class_labels', False) else label_preds + 1 + + if post_process_cfg.NMS_CONFIG.get('SCORE_BY_CLASS', None) and \ + post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'score_by_class': + nms_scores = self.set_nms_score_by_class( + iou_preds, cls_preds, label_preds, post_process_cfg.NMS_CONFIG.SCORE_BY_CLASS + ) + elif post_process_cfg.NMS_CONFIG.get('SCORE_TYPE', None) == 'iou' or \ + post_process_cfg.NMS_CONFIG.get('SCORE_TYPE', None) is None: + nms_scores = iou_preds + elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'cls': + nms_scores = cls_preds + elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'weighted_iou_cls': + nms_scores = post_process_cfg.NMS_CONFIG.SCORE_WEIGHTS.iou * iou_preds + \ + post_process_cfg.NMS_CONFIG.SCORE_WEIGHTS.cls * cls_preds + elif post_process_cfg.NMS_CONFIG.SCORE_TYPE == 'num_pts_iou_cls': + point_mask = (batch_dict['points'][:, 0] == batch_mask) + batch_points = batch_dict['points'][point_mask][:, 1:4] + + num_pts_in_gt = roiaware_pool3d_utils.points_in_boxes_cpu( + batch_points.cpu(), box_preds[:, 0:7].cpu() + ).sum(dim=1).float().cuda() + + score_thresh_cfg = post_process_cfg.NMS_CONFIG.SCORE_THRESH + nms_scores = self.cal_scores_by_npoints( + cls_preds, iou_preds, num_pts_in_gt, + score_thresh_cfg.cls, score_thresh_cfg.iou + ) + else: + raise NotImplementedError + + selected, selected_scores = class_agnostic_nms( + box_scores=nms_scores, box_preds=box_preds, + nms_config=post_process_cfg.NMS_CONFIG, + score_thresh=post_process_cfg.SCORE_THRESH + ) + + if post_process_cfg.OUTPUT_RAW_SCORE: + raise NotImplementedError + + final_scores = selected_scores + final_labels = label_preds[selected] + final_boxes = box_preds[selected] + + recall_dict = self.generate_recall_record( + box_preds=final_boxes if 'rois' not in batch_dict else src_box_preds, + recall_dict=recall_dict, batch_index=index, data_dict=batch_dict, + thresh_list=post_process_cfg.RECALL_THRESH_LIST + ) + + record_dict = { + 'pred_boxes': final_boxes, + 'pred_scores': final_scores, + 'pred_labels': final_labels, + 'pred_cls_scores': cls_preds[selected], + 'pred_iou_scores': iou_preds[selected] + } + + pred_dicts.append(record_dict) + + return pred_dicts, recall_dict diff --git a/toolbox/openpcdet/pcdet/models/detectors/transfusion.py b/toolbox/openpcdet/pcdet/models/detectors/transfusion.py new file mode 100644 index 000000000..16d81e849 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/detectors/transfusion.py @@ -0,0 +1,50 @@ +from .detector3d_template import Detector3DTemplate + + +class TransFusion(Detector3DTemplate): + def __init__(self, model_cfg, num_class, dataset): + super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) + self.module_list = self.build_networks() + + def forward(self, batch_dict): + for cur_module in self.module_list: + batch_dict = cur_module(batch_dict) + + if self.training: + loss, tb_dict, disp_dict = self.get_training_loss(batch_dict) + + ret_dict = { + 'loss': loss + } + return ret_dict, tb_dict, disp_dict + else: + pred_dicts, recall_dicts = self.post_processing(batch_dict) + return pred_dicts, recall_dicts + + def get_training_loss(self,batch_dict): + disp_dict = {} + + loss_trans, tb_dict = batch_dict['loss'],batch_dict['tb_dict'] + tb_dict = { + 'loss_trans': loss_trans.item(), + **tb_dict + } + + loss = loss_trans + return loss, tb_dict, disp_dict + + def post_processing(self, batch_dict): + post_process_cfg = self.model_cfg.POST_PROCESSING + batch_size = batch_dict['batch_size'] + final_pred_dict = batch_dict['final_box_dicts'] + recall_dict = {} + for index in range(batch_size): + pred_boxes = final_pred_dict[index]['pred_boxes'] + + recall_dict = self.generate_recall_record( + box_preds=pred_boxes, + recall_dict=recall_dict, batch_index=index, data_dict=batch_dict, + thresh_list=post_process_cfg.RECALL_THRESH_LIST + ) + + return final_pred_dict, recall_dict diff --git a/toolbox/openpcdet/pcdet/models/detectors/voxel_rcnn.py b/toolbox/openpcdet/pcdet/models/detectors/voxel_rcnn.py new file mode 100644 index 000000000..469e868a3 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/detectors/voxel_rcnn.py @@ -0,0 +1,37 @@ +from .detector3d_template import Detector3DTemplate + + +class VoxelRCNN(Detector3DTemplate): + def __init__(self, model_cfg, num_class, dataset): + super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) + self.module_list = self.build_networks() + + def forward(self, batch_dict): + for cur_module in self.module_list: + batch_dict = cur_module(batch_dict) + + if self.training: + loss, tb_dict, disp_dict = self.get_training_loss() + + ret_dict = { + 'loss': loss + } + return ret_dict, tb_dict, disp_dict + else: + pred_dicts, recall_dicts = self.post_processing(batch_dict) + return pred_dicts, recall_dicts + + def get_training_loss(self): + disp_dict = {} + loss = 0 + + loss_rpn, tb_dict = self.dense_head.get_loss() + loss_rcnn, tb_dict = self.roi_head.get_loss(tb_dict) + + loss = loss + loss_rpn + loss_rcnn + + if hasattr(self.backbone_3d, 'get_loss'): + loss_backbone3d, tb_dict = self.backbone_3d.get_loss(tb_dict) + loss += loss_backbone3d + + return loss, tb_dict, disp_dict diff --git a/toolbox/openpcdet/pcdet/models/detectors/voxelnext.py b/toolbox/openpcdet/pcdet/models/detectors/voxelnext.py new file mode 100644 index 000000000..f4524223a --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/detectors/voxelnext.py @@ -0,0 +1,44 @@ +from .detector3d_template import Detector3DTemplate + +class VoxelNeXt(Detector3DTemplate): + def __init__(self, model_cfg, num_class, dataset): + super().__init__(model_cfg=model_cfg, num_class=num_class, dataset=dataset) + self.module_list = self.build_networks() + + def forward(self, batch_dict): + + for cur_module in self.module_list: + batch_dict = cur_module(batch_dict) + + if self.training: + loss, tb_dict, disp_dict = self.get_training_loss() + ret_dict = { + 'loss': loss + } + return ret_dict, tb_dict, disp_dict + else: + pred_dicts, recall_dicts = self.post_processing(batch_dict) + return pred_dicts, recall_dicts + + def get_training_loss(self): + + disp_dict = {} + loss, tb_dict = self.dense_head.get_loss() + + return loss, tb_dict, disp_dict + + def post_processing(self, batch_dict): + post_process_cfg = self.model_cfg.POST_PROCESSING + batch_size = batch_dict['batch_size'] + final_pred_dict = batch_dict['final_box_dicts'] + recall_dict = {} + for index in range(batch_size): + pred_boxes = final_pred_dict[index]['pred_boxes'] + + recall_dict = self.generate_recall_record( + box_preds=pred_boxes, + recall_dict=recall_dict, batch_index=index, data_dict=batch_dict, + thresh_list=post_process_cfg.RECALL_THRESH_LIST + ) + + return final_pred_dict, recall_dict diff --git a/toolbox/openpcdet/pcdet/models/model_utils/__init__.py b/toolbox/openpcdet/pcdet/models/model_utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/models/model_utils/basic_block_2d.py b/toolbox/openpcdet/pcdet/models/model_utils/basic_block_2d.py new file mode 100644 index 000000000..f285eb53d --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/model_utils/basic_block_2d.py @@ -0,0 +1,34 @@ +import torch.nn as nn + + +class BasicBlock2D(nn.Module): + + def __init__(self, in_channels, out_channels, **kwargs): + """ + Initializes convolutional block + Args: + in_channels: int, Number of input channels + out_channels: int, Number of output channels + **kwargs: Dict, Extra arguments for nn.Conv2d + """ + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.conv = nn.Conv2d(in_channels=in_channels, + out_channels=out_channels, + **kwargs) + self.bn = nn.BatchNorm2d(out_channels) + self.relu = nn.ReLU(inplace=True) + + def forward(self, features): + """ + Applies convolutional block + Args: + features: (B, C_in, H, W), Input features + Returns: + x: (B, C_out, H, W), Output features + """ + x = self.conv(features) + x = self.bn(x) + x = self.relu(x) + return x diff --git a/toolbox/openpcdet/pcdet/models/model_utils/centernet_utils.py b/toolbox/openpcdet/pcdet/models/model_utils/centernet_utils.py new file mode 100644 index 000000000..d24f1caf8 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/model_utils/centernet_utils.py @@ -0,0 +1,385 @@ +# This file is modified from https://github.com/tianweiy/CenterPoint + +import torch +import torch.nn.functional as F +import numpy as np +import numba + + +def gaussian_radius(height, width, min_overlap=0.5): + """ + Args: + height: (N) + width: (N) + min_overlap: + Returns: + """ + a1 = 1 + b1 = (height + width) + c1 = width * height * (1 - min_overlap) / (1 + min_overlap) + sq1 = (b1 ** 2 - 4 * a1 * c1).sqrt() + r1 = (b1 + sq1) / 2 + + a2 = 4 + b2 = 2 * (height + width) + c2 = (1 - min_overlap) * width * height + sq2 = (b2 ** 2 - 4 * a2 * c2).sqrt() + r2 = (b2 + sq2) / 2 + + a3 = 4 * min_overlap + b3 = -2 * min_overlap * (height + width) + c3 = (min_overlap - 1) * width * height + sq3 = (b3 ** 2 - 4 * a3 * c3).sqrt() + r3 = (b3 + sq3) / 2 + ret = torch.min(torch.min(r1, r2), r3) + return ret + + +def gaussian2D(shape, sigma=1): + m, n = [(ss - 1.) / 2. for ss in shape] + y, x = np.ogrid[-m:m + 1, -n:n + 1] + + h = np.exp(-(x * x + y * y) / (2 * sigma * sigma)) + h[h < np.finfo(h.dtype).eps * h.max()] = 0 + return h + + +def draw_gaussian_to_heatmap(heatmap, center, radius, k=1, valid_mask=None): + diameter = 2 * radius + 1 + gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6) + + x, y = int(center[0]), int(center[1]) + + height, width = heatmap.shape[0:2] + + left, right = min(x, radius), min(width - x, radius + 1) + top, bottom = min(y, radius), min(height - y, radius + 1) + + masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right] + masked_gaussian = torch.from_numpy( + gaussian[radius - top:radius + bottom, radius - left:radius + right] + ).to(heatmap.device).float() + + if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug + if valid_mask is not None: + cur_valid_mask = valid_mask[y - top:y + bottom, x - left:x + right] + masked_gaussian = masked_gaussian * cur_valid_mask.float() + + torch.max(masked_heatmap, masked_gaussian * k, out=masked_heatmap) + return heatmap + + +def _nms(heat, kernel=3): + pad = (kernel - 1) // 2 + + hmax = F.max_pool2d(heat, (kernel, kernel), stride=1, padding=pad) + keep = (hmax == heat).float() + return heat * keep + + +def gaussian3D(shape, sigma=1): + m, n = [(ss - 1.) / 2. for ss in shape] + y, x = np.ogrid[-m:m + 1, -n:n + 1] + + h = np.exp(-(x * x + y * y) / (2 * sigma * sigma)) + h[h < np.finfo(h.dtype).eps * h.max()] = 0 + return h + + +def draw_gaussian_to_heatmap_voxels(heatmap, distances, radius, k=1): + diameter = 2 * radius + 1 + sigma = diameter / 6 + masked_gaussian = torch.exp(- distances / (2 * sigma * sigma)) + + torch.max(heatmap, masked_gaussian, out=heatmap) + + return heatmap + + +@numba.jit(nopython=True) +def circle_nms(dets, thresh): + x1 = dets[:, 0] + y1 = dets[:, 1] + scores = dets[:, 2] + order = scores.argsort()[::-1].astype(np.int32) # highest->lowest + ndets = dets.shape[0] + suppressed = np.zeros((ndets), dtype=np.int32) + keep = [] + for _i in range(ndets): + i = order[_i] # start with highest score box + if suppressed[i] == 1: # if any box have enough iou with this, remove it + continue + keep.append(i) + for _j in range(_i + 1, ndets): + j = order[_j] + if suppressed[j] == 1: + continue + # calculate center distance between i and j box + dist = (x1[i] - x1[j]) ** 2 + (y1[i] - y1[j]) ** 2 + + # ovr = inter / areas[j] + if dist <= thresh: + suppressed[j] = 1 + return keep + + +def _circle_nms(boxes, min_radius, post_max_size=83): + """ + NMS according to center distance + """ + keep = np.array(circle_nms(boxes.cpu().numpy(), thresh=min_radius))[:post_max_size] + + keep = torch.from_numpy(keep).long().to(boxes.device) + + return keep + + +def _gather_feat(feat, ind, mask=None): + dim = feat.size(2) + ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim) + feat = feat.gather(1, ind) + if mask is not None: + mask = mask.unsqueeze(2).expand_as(feat) + feat = feat[mask] + feat = feat.view(-1, dim) + return feat + + +def _transpose_and_gather_feat(feat, ind): + feat = feat.permute(0, 2, 3, 1).contiguous() + feat = feat.view(feat.size(0), -1, feat.size(3)) + feat = _gather_feat(feat, ind) + return feat + + +def _topk(scores, K=40): + batch, num_class, height, width = scores.size() + + topk_scores, topk_inds = torch.topk(scores.flatten(2, 3), K) + + topk_inds = topk_inds % (height * width) + topk_ys = (topk_inds // width).float() + topk_xs = (topk_inds % width).int().float() + + topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K) + topk_classes = (topk_ind // K).int() + topk_inds = _gather_feat(topk_inds.view(batch, -1, 1), topk_ind).view(batch, K) + topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K) + topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K) + + return topk_score, topk_inds, topk_classes, topk_ys, topk_xs + + +def decode_bbox_from_heatmap(heatmap, rot_cos, rot_sin, center, center_z, dim, + point_cloud_range=None, voxel_size=None, feature_map_stride=None, vel=None, iou=None, K=100, + circle_nms=False, score_thresh=None, post_center_limit_range=None): + batch_size, num_class, _, _ = heatmap.size() + + if circle_nms: + # TODO: not checked yet + assert False, 'not checked yet' + heatmap = _nms(heatmap) + + scores, inds, class_ids, ys, xs = _topk(heatmap, K=K) + center = _transpose_and_gather_feat(center, inds).view(batch_size, K, 2) + rot_sin = _transpose_and_gather_feat(rot_sin, inds).view(batch_size, K, 1) + rot_cos = _transpose_and_gather_feat(rot_cos, inds).view(batch_size, K, 1) + center_z = _transpose_and_gather_feat(center_z, inds).view(batch_size, K, 1) + dim = _transpose_and_gather_feat(dim, inds).view(batch_size, K, 3) + + angle = torch.atan2(rot_sin, rot_cos) + xs = xs.view(batch_size, K, 1) + center[:, :, 0:1] + ys = ys.view(batch_size, K, 1) + center[:, :, 1:2] + + xs = xs * feature_map_stride * voxel_size[0] + point_cloud_range[0] + ys = ys * feature_map_stride * voxel_size[1] + point_cloud_range[1] + + box_part_list = [xs, ys, center_z, dim, angle] + if vel is not None: + vel = _transpose_and_gather_feat(vel, inds).view(batch_size, K, 2) + box_part_list.append(vel) + + if iou is not None: + iou = _transpose_and_gather_feat(iou, inds).view(batch_size, K) + + final_box_preds = torch.cat((box_part_list), dim=-1) + final_scores = scores.view(batch_size, K) + final_class_ids = class_ids.view(batch_size, K) + + assert post_center_limit_range is not None + mask = (final_box_preds[..., :3] >= post_center_limit_range[:3]).all(2) + mask &= (final_box_preds[..., :3] <= post_center_limit_range[3:]).all(2) + + if score_thresh is not None: + mask &= (final_scores > score_thresh) + + ret_pred_dicts = [] + for k in range(batch_size): + cur_mask = mask[k] + cur_boxes = final_box_preds[k, cur_mask] + cur_scores = final_scores[k, cur_mask] + cur_labels = final_class_ids[k, cur_mask] + + if circle_nms: + assert False, 'not checked yet' + centers = cur_boxes[:, [0, 1]] + boxes = torch.cat((centers, scores.view(-1, 1)), dim=1) + keep = _circle_nms(boxes, min_radius=min_radius, post_max_size=nms_post_max_size) + + cur_boxes = cur_boxes[keep] + cur_scores = cur_scores[keep] + cur_labels = cur_labels[keep] + + ret_pred_dicts.append({ + 'pred_boxes': cur_boxes, + 'pred_scores': cur_scores, + 'pred_labels': cur_labels + }) + + if iou is not None: + ret_pred_dicts[-1]['pred_iou'] = iou[k, cur_mask] + return ret_pred_dicts + +def _topk_1d(scores, batch_size, batch_idx, obj, K=40, nuscenes=False): + # scores: (N, num_classes) + topk_score_list = [] + topk_inds_list = [] + topk_classes_list = [] + + for bs_idx in range(batch_size): + batch_inds = batch_idx==bs_idx + if obj.shape[-1] == 1 and not nuscenes: + score = scores[batch_inds].permute(1, 0) + topk_scores, topk_inds = torch.topk(score, K) + topk_score, topk_ind = torch.topk(obj[topk_inds.view(-1)].squeeze(-1), K) #torch.topk(topk_scores.view(-1), K) + else: + score = obj[batch_inds].permute(1, 0) + topk_scores, topk_inds = torch.topk(score, min(K, score.shape[-1])) + topk_score, topk_ind = torch.topk(topk_scores.view(-1), min(K, topk_scores.view(-1).shape[-1])) + #topk_score, topk_ind = torch.topk(score.reshape(-1), K) + + topk_classes = (topk_ind // K).int() + topk_inds = topk_inds.view(-1).gather(0, topk_ind) + #print('topk_inds', topk_inds) + + if not obj is None and obj.shape[-1] == 1: + topk_score_list.append(obj[batch_inds][topk_inds]) + else: + topk_score_list.append(topk_score) + topk_inds_list.append(topk_inds) + topk_classes_list.append(topk_classes) + + topk_score = torch.stack(topk_score_list) + topk_inds = torch.stack(topk_inds_list) + topk_classes = torch.stack(topk_classes_list) + + return topk_score, topk_inds, topk_classes + +def gather_feat_idx(feats, inds, batch_size, batch_idx): + feats_list = [] + dim = feats.size(-1) + _inds = inds.unsqueeze(-1).expand(inds.size(0), inds.size(1), dim) + + for bs_idx in range(batch_size): + batch_inds = batch_idx==bs_idx + feat = feats[batch_inds] + feats_list.append(feat.gather(0, _inds[bs_idx])) + feats = torch.stack(feats_list) + return feats + +def decode_bbox_from_voxels_nuscenes(batch_size, indices, obj, rot_cos, rot_sin, + center, center_z, dim, vel=None, iou=None, point_cloud_range=None, voxel_size=None, voxels_3d=None, + feature_map_stride=None, K=100, score_thresh=None, post_center_limit_range=None, add_features=None): + batch_idx = indices[:, 0] + spatial_indices = indices[:, 1:] + scores, inds, class_ids = _topk_1d(None, batch_size, batch_idx, obj, K=K, nuscenes=True) + + center = gather_feat_idx(center, inds, batch_size, batch_idx) + rot_sin = gather_feat_idx(rot_sin, inds, batch_size, batch_idx) + rot_cos = gather_feat_idx(rot_cos, inds, batch_size, batch_idx) + center_z = gather_feat_idx(center_z, inds, batch_size, batch_idx) + dim = gather_feat_idx(dim, inds, batch_size, batch_idx) + spatial_indices = gather_feat_idx(spatial_indices, inds, batch_size, batch_idx) + + if not add_features is None: + add_features = [gather_feat_idx(add_feature, inds, batch_size, batch_idx) for add_feature in add_features] + + if not isinstance(feature_map_stride, int): + feature_map_stride = gather_feat_idx(feature_map_stride.unsqueeze(-1), inds, batch_size, batch_idx) + + angle = torch.atan2(rot_sin, rot_cos) + xs = (spatial_indices[:, :, -1:] + center[:, :, 0:1]) * feature_map_stride * voxel_size[0] + point_cloud_range[0] + ys = (spatial_indices[:, :, -2:-1] + center[:, :, 1:2]) * feature_map_stride * voxel_size[1] + point_cloud_range[1] + #zs = (spatial_indices[:, :, 0:1]) * feature_map_stride * voxel_size[2] + point_cloud_range[2] + center_z + + box_part_list = [xs, ys, center_z, dim, angle] + + if not vel is None: + vel = gather_feat_idx(vel, inds, batch_size, batch_idx) + box_part_list.append(vel) + + if not iou is None: + iou = gather_feat_idx(iou, inds, batch_size, batch_idx) + iou = torch.clamp(iou, min=0, max=1.) + + final_box_preds = torch.cat((box_part_list), dim=-1) + final_scores = scores.view(batch_size, K) + final_class_ids = class_ids.view(batch_size, K) + if not add_features is None: + add_features = [add_feature.view(batch_size, K, add_feature.shape[-1]) for add_feature in add_features] + + assert post_center_limit_range is not None + mask = (final_box_preds[..., :3] >= post_center_limit_range[:3]).all(2) + mask &= (final_box_preds[..., :3] <= post_center_limit_range[3:]).all(2) + + if score_thresh is not None: + mask &= (final_scores > score_thresh) + + ret_pred_dicts = [] + for k in range(batch_size): + cur_mask = mask[k] + cur_boxes = final_box_preds[k, cur_mask] + cur_scores = final_scores[k, cur_mask] + cur_labels = final_class_ids[k, cur_mask] + cur_add_features = [add_feature[k, cur_mask] for add_feature in add_features] if not add_features is None else None + cur_iou = iou[k, cur_mask] if not iou is None else None + + ret_pred_dicts.append({ + 'pred_boxes': cur_boxes, + 'pred_scores': cur_scores, + 'pred_labels': cur_labels, + 'pred_ious': cur_iou, + 'add_features': cur_add_features, + }) + return ret_pred_dicts + + +def decode_bbox_from_pred_dicts(pred_dict, point_cloud_range=None, voxel_size=None, feature_map_stride=None): + batch_size, _, H, W = pred_dict['center'].shape + + batch_center = pred_dict['center'].permute(0, 2, 3, 1).contiguous().view(batch_size, H*W, 2) # (B, H, W, 2) + batch_center_z = pred_dict['center_z'].permute(0, 2, 3, 1).contiguous().view(batch_size, H*W, 1) # (B, H, W, 1) + batch_dim = pred_dict['dim'].exp().permute(0, 2, 3, 1).contiguous().view(batch_size, H*W, 3) # (B, H, W, 3) + batch_rot_cos = pred_dict['rot'][:, 0].unsqueeze(dim=1).permute(0, 2, 3, 1).contiguous().view(batch_size, H*W, 1) # (B, H, W, 1) + batch_rot_sin = pred_dict['rot'][:, 1].unsqueeze(dim=1).permute(0, 2, 3, 1).contiguous().view(batch_size, H*W, 1) # (B, H, W, 1) + batch_vel = pred_dict['vel'].permute(0, 2, 3, 1).contiguous().view(batch_size, H*W, 2) if 'vel' in pred_dict.keys() else None + + angle = torch.atan2(batch_rot_sin, batch_rot_cos) # (B, H*W, 1) + + ys, xs = torch.meshgrid([torch.arange(0, H, device=batch_center.device, dtype=batch_center.dtype), + torch.arange(0, W, device=batch_center.device, dtype=batch_center.dtype)]) + ys = ys.view(1, H, W).repeat(batch_size, 1, 1) + xs = xs.view(1, H, W).repeat(batch_size, 1, 1) + xs = xs.view(batch_size, -1, 1) + batch_center[:, :, 0:1] + ys = ys.view(batch_size, -1, 1) + batch_center[:, :, 1:2] + + xs = xs * feature_map_stride * voxel_size[0] + point_cloud_range[0] + ys = ys * feature_map_stride * voxel_size[1] + point_cloud_range[1] + + box_part_list = [xs, ys, batch_center_z, batch_dim, angle] + if batch_vel is not None: + box_part_list.append(batch_vel) + + box_preds = torch.cat((box_part_list), dim=-1).view(batch_size, H, W, -1) + + return box_preds diff --git a/toolbox/openpcdet/pcdet/models/model_utils/dsvt_utils.py b/toolbox/openpcdet/pcdet/models/model_utils/dsvt_utils.py new file mode 100644 index 000000000..a3640528c --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/model_utils/dsvt_utils.py @@ -0,0 +1,150 @@ +import torch +import torch.nn as nn +import numpy as np +from pcdet.ops.ingroup_inds.ingroup_inds_op import ingroup_inds + + +get_inner_win_inds_cuda = ingroup_inds + + +class PositionEmbeddingLearned(nn.Module): + """ + Absolute pos embedding, learned. + """ + def __init__(self, input_channel, num_pos_feats): + super().__init__() + self.position_embedding_head = nn.Sequential( + nn.Linear(input_channel, num_pos_feats), + nn.BatchNorm1d(num_pos_feats), + nn.ReLU(inplace=True), + nn.Linear(num_pos_feats, num_pos_feats)) + + def forward(self, xyz): + position_embedding = self.position_embedding_head(xyz) + return position_embedding + + +@torch.no_grad() +def get_window_coors(coors, sparse_shape, window_shape, do_shift, shift_list=None, return_win_coors=False): + + if len(window_shape) == 2: + win_shape_x, win_shape_y = window_shape + win_shape_z = sparse_shape[-1] + else: + win_shape_x, win_shape_y, win_shape_z = window_shape + + sparse_shape_x, sparse_shape_y, sparse_shape_z = sparse_shape + assert sparse_shape_z < sparse_shape_x, 'Usually holds... in case of wrong order' + + max_num_win_x = int(np.ceil((sparse_shape_x / win_shape_x)) + 1) # plus one here to meet the needs of shift. + max_num_win_y = int(np.ceil((sparse_shape_y / win_shape_y)) + 1) # plus one here to meet the needs of shift. + max_num_win_z = int(np.ceil((sparse_shape_z / win_shape_z)) + 1) # plus one here to meet the needs of shift. + max_num_win_per_sample = max_num_win_x * max_num_win_y * max_num_win_z + + if do_shift: + if shift_list is not None: + shift_x, shift_y, shift_z = shift_list[0], shift_list[1], shift_list[2] + else: + shift_x, shift_y, shift_z = win_shape_x // 2, win_shape_y // 2, win_shape_z // 2 + else: + if shift_list is not None: + shift_x, shift_y, shift_z = shift_list[0], shift_list[1], shift_list[2] + else: + shift_x, shift_y, shift_z = win_shape_x, win_shape_y, win_shape_z + + # compatibility between 2D window and 3D window + if sparse_shape_z == win_shape_z: + shift_z = 0 + + shifted_coors_x = coors[:, 3] + shift_x + shifted_coors_y = coors[:, 2] + shift_y + shifted_coors_z = coors[:, 1] + shift_z + + win_coors_x = shifted_coors_x // win_shape_x + win_coors_y = shifted_coors_y // win_shape_y + win_coors_z = shifted_coors_z // win_shape_z + + if len(window_shape) == 2: + assert (win_coors_z == 0).all() + + batch_win_inds = coors[:, 0] * max_num_win_per_sample + \ + win_coors_x * max_num_win_y * max_num_win_z + \ + win_coors_y * max_num_win_z + \ + win_coors_z + + coors_in_win_x = shifted_coors_x % win_shape_x + coors_in_win_y = shifted_coors_y % win_shape_y + coors_in_win_z = shifted_coors_z % win_shape_z + coors_in_win = torch.stack([coors_in_win_z, coors_in_win_y, coors_in_win_x], dim=-1) + # coors_in_win = torch.stack([coors_in_win_x, coors_in_win_y], dim=-1) + if return_win_coors: + batch_win_coords = torch.stack([win_coors_z, win_coors_y, win_coors_x], dim=-1) + return batch_win_inds, coors_in_win, batch_win_coords + + return batch_win_inds, coors_in_win + + +def get_pooling_index(coors, sparse_shape, window_shape): + win_shape_x, win_shape_y, win_shape_z = window_shape + sparse_shape_x, sparse_shape_y, sparse_shape_z = sparse_shape + + max_num_win_x = int(np.ceil((sparse_shape_x / win_shape_x))) + max_num_win_y = int(np.ceil((sparse_shape_y / win_shape_y))) + max_num_win_z = int(np.ceil((sparse_shape_z / win_shape_z))) + max_num_win_per_sample = max_num_win_x * max_num_win_y * max_num_win_z + + coors_x = coors[:, 3] + coors_y = coors[:, 2] + coors_z = coors[:, 1] + + win_coors_x = coors_x // win_shape_x + win_coors_y = coors_y // win_shape_y + win_coors_z = coors_z // win_shape_z + + batch_win_inds = coors[:, 0] * max_num_win_per_sample + \ + win_coors_x * max_num_win_y * max_num_win_z + \ + win_coors_y * max_num_win_z + \ + win_coors_z + + coors_in_win_x = coors_x % win_shape_x + coors_in_win_y = coors_y % win_shape_y + coors_in_win_z = coors_z % win_shape_z + coors_in_win = torch.stack([coors_in_win_z, coors_in_win_y, coors_in_win_x], dim=-1) + + index_in_win = coors_in_win_x * win_shape_y * win_shape_z + \ + coors_in_win_y * win_shape_z + \ + coors_in_win_z + + batch_win_coords = torch.stack([coors[:, 0], win_coors_z, win_coors_y, win_coors_x], dim=-1) + return batch_win_inds, coors_in_win, index_in_win, batch_win_coords + + +def get_continous_inds(setnum_per_win): + ''' + Args: + setnum_per_win (Tensor[int]): Number of sets assigned to each window with shape (win_num). + Returns: + set_win_inds (Tensor[int]): Window indexs of each set with shape (set_num). + set_inds_in_win (Tensor[int]): Set indexs inner window with shape (set_num). + + Examples: + setnum_per_win = torch.tensor([1, 2, 1, 3]) + set_inds_in_win = get_continous_inds(setnum_per_win) + # we can get: set_inds_in_win = tensor([0, 0, 1, 0, 0, 1, 2]) + ''' + set_num = setnum_per_win.sum().item() # set_num = 7 + setnum_per_win_cumsum = torch.cumsum(setnum_per_win, dim=0)[:-1] # [1, 3, 4] + set_win_inds = torch.full((set_num,), 0, device=setnum_per_win.device) + set_win_inds[setnum_per_win_cumsum] = 1 # [0, 1, 0, 1, 1, 0, 0] + set_win_inds = torch.cumsum(set_win_inds, dim=0) # [0, 1, 1, 2, 3, 3, 3] + + roll_set_win_inds_left = torch.roll(set_win_inds, -1) # [1, 1, 2, 3, 3, 3, 0] + diff = set_win_inds - roll_set_win_inds_left # [-1, 0, -1, -1, 0, 0, 3] + end_pos_mask = diff != 0 + template = torch.ones_like(set_win_inds) + template[end_pos_mask] = (setnum_per_win - 1) * -1 # [ 0, 1, -1, 0, 1, 1, -2] + set_inds_in_win = torch.cumsum(template,dim=0) # [0, 1, 0, 0, 1, 2, 0] + set_inds_in_win[end_pos_mask] = setnum_per_win # [1, 1, 2, 1, 1, 2, 3] + set_inds_in_win = set_inds_in_win - 1 # [0, 0, 1, 0, 0, 1, 2] + + return set_win_inds, set_inds_in_win \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/models/model_utils/model_nms_utils.py b/toolbox/openpcdet/pcdet/models/model_utils/model_nms_utils.py new file mode 100644 index 000000000..8be1097e9 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/model_utils/model_nms_utils.py @@ -0,0 +1,107 @@ +import torch + +from ...ops.iou3d_nms import iou3d_nms_utils + + +def class_agnostic_nms(box_scores, box_preds, nms_config, score_thresh=None): + src_box_scores = box_scores + if score_thresh is not None: + scores_mask = (box_scores >= score_thresh) + box_scores = box_scores[scores_mask] + box_preds = box_preds[scores_mask] + + selected = [] + if box_scores.shape[0] > 0: + box_scores_nms, indices = torch.topk(box_scores, k=min(nms_config.NMS_PRE_MAXSIZE, box_scores.shape[0])) + boxes_for_nms = box_preds[indices] + keep_idx, selected_scores = getattr(iou3d_nms_utils, nms_config.NMS_TYPE)( + boxes_for_nms[:, 0:7], box_scores_nms, nms_config.NMS_THRESH, **nms_config + ) + selected = indices[keep_idx[:nms_config.NMS_POST_MAXSIZE]] + + if score_thresh is not None: + original_idxs = scores_mask.nonzero().view(-1) + selected = original_idxs[selected] + return selected, src_box_scores[selected] + + +def multi_classes_nms(cls_scores, box_preds, nms_config, score_thresh=None): + """ + Args: + cls_scores: (N, num_class) + box_preds: (N, 7 + C) + nms_config: + score_thresh: + + Returns: + + """ + pred_scores, pred_labels, pred_boxes = [], [], [] + for k in range(cls_scores.shape[1]): + if score_thresh is not None: + scores_mask = (cls_scores[:, k] >= score_thresh) + box_scores = cls_scores[scores_mask, k] + cur_box_preds = box_preds[scores_mask] + else: + box_scores = cls_scores[:, k] + cur_box_preds = box_preds + + selected = [] + if box_scores.shape[0] > 0: + box_scores_nms, indices = torch.topk(box_scores, k=min(nms_config.NMS_PRE_MAXSIZE, box_scores.shape[0])) + boxes_for_nms = cur_box_preds[indices] + keep_idx, selected_scores = getattr(iou3d_nms_utils, nms_config.NMS_TYPE)( + boxes_for_nms[:, 0:7], box_scores_nms, nms_config.NMS_THRESH, **nms_config + ) + selected = indices[keep_idx[:nms_config.NMS_POST_MAXSIZE]] + + pred_scores.append(box_scores[selected]) + pred_labels.append(box_scores.new_ones(len(selected)).long() * k) + pred_boxes.append(cur_box_preds[selected]) + + pred_scores = torch.cat(pred_scores, dim=0) + pred_labels = torch.cat(pred_labels, dim=0) + pred_boxes = torch.cat(pred_boxes, dim=0) + + return pred_scores, pred_labels, pred_boxes + + +def class_specific_nms(box_scores, box_preds, box_labels, nms_config, score_thresh=None): + """ + Args: + cls_scores: (N,) + box_preds: (N, 7 + C) + box_labels: (N,) + nms_config: + + Returns: + + """ + selected = [] + for k in range(len(nms_config.NMS_THRESH)): + curr_mask = box_labels == k + if score_thresh is not None and isinstance(score_thresh, float): + curr_mask *= (box_scores > score_thresh) + elif score_thresh is not None and isinstance(score_thresh, list): + curr_mask *= (box_scores > score_thresh[k]) + curr_idx = torch.nonzero(curr_mask)[:, 0] + curr_box_scores = box_scores[curr_mask] + cur_box_preds = box_preds[curr_mask] + + if curr_box_scores.shape[0] > 0: + curr_box_scores_nms = curr_box_scores + curr_boxes_for_nms = cur_box_preds + + keep_idx, _ = getattr(iou3d_nms_utils, 'nms_gpu')( + curr_boxes_for_nms, curr_box_scores_nms, + thresh=nms_config.NMS_THRESH[k], + pre_maxsize=nms_config.NMS_PRE_MAXSIZE[k], + post_max_size=nms_config.NMS_POST_MAXSIZE[k] + ) + curr_selected = curr_idx[keep_idx] + selected.append(curr_selected) + if len(selected) != 0: + selected = torch.cat(selected) + + + return selected, box_scores[selected] diff --git a/toolbox/openpcdet/pcdet/models/model_utils/mppnet_utils.py b/toolbox/openpcdet/pcdet/models/model_utils/mppnet_utils.py new file mode 100644 index 000000000..10641ad3a --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/model_utils/mppnet_utils.py @@ -0,0 +1,420 @@ +from os import getgrouplist +import torch.nn as nn +import torch +import numpy as np +import torch.nn.functional as F +from typing import Optional, List +from torch import Tensor +from torch.nn.init import xavier_uniform_, zeros_, kaiming_normal_ + + +class PointNetfeat(nn.Module): + def __init__(self, input_dim, x=1,outchannel=512): + super(PointNetfeat, self).__init__() + if outchannel==256: + self.output_channel = 256 + else: + self.output_channel = 512 * x + self.conv1 = torch.nn.Conv1d(input_dim, 64 * x, 1) + self.conv2 = torch.nn.Conv1d(64 * x, 128 * x, 1) + self.conv3 = torch.nn.Conv1d(128 * x, 256 * x, 1) + self.conv4 = torch.nn.Conv1d(256 * x, self.output_channel, 1) + self.bn1 = nn.BatchNorm1d(64 * x) + self.bn2 = nn.BatchNorm1d(128 * x) + self.bn3 = nn.BatchNorm1d(256 * x) + self.bn4 = nn.BatchNorm1d(self.output_channel) + + def forward(self, x): + x = F.relu(self.bn1(self.conv1(x))) + x = F.relu(self.bn2(self.conv2(x))) + x = F.relu(self.bn3(self.conv3(x))) + x_ori = self.bn4(self.conv4(x)) + + x = torch.max(x_ori, 2, keepdim=True)[0] + + x = x.view(-1, self.output_channel) + return x, x_ori + +class PointNet(nn.Module): + def __init__(self, input_dim, joint_feat=False,model_cfg=None): + super(PointNet, self).__init__() + self.joint_feat = joint_feat + channels = model_cfg.TRANS_INPUT + + times=1 + self.feat = PointNetfeat(input_dim, 1) + + self.fc1 = nn.Linear(512, 256 ) + self.fc2 = nn.Linear(256, channels) + + self.pre_bn = nn.BatchNorm1d(input_dim) + self.bn1 = nn.BatchNorm1d(256) + self.bn2 = nn.BatchNorm1d(channels) + self.relu = nn.ReLU() + + self.fc_s1 = nn.Linear(channels*times, 256) + self.fc_s2 = nn.Linear(256, 3, bias=False) + self.fc_ce1 = nn.Linear(channels*times, 256) + self.fc_ce2 = nn.Linear(256, 3, bias=False) + self.fc_hr1 = nn.Linear(channels*times, 256) + self.fc_hr2 = nn.Linear(256, 1, bias=False) + + def forward(self, x, feat=None): + + if self.joint_feat: + if len(feat.shape) > 2: + feat = torch.max(feat, 2, keepdim=True)[0] + x = feat.view(-1, self.output_channel) + x = F.relu(self.bn1(self.fc1(x))) + feat = F.relu(self.bn2(self.fc2(x))) + else: + feat = feat + feat_traj = None + else: + x, feat_traj = self.feat(self.pre_bn(x)) + x = F.relu(self.bn1(self.fc1(x))) + feat = F.relu(self.bn2(self.fc2(x))) + + x = F.relu(self.fc_ce1(feat)) + centers = self.fc_ce2(x) + + x = F.relu(self.fc_s1(feat)) + sizes = self.fc_s2(x) + + x = F.relu(self.fc_hr1(feat)) + headings = self.fc_hr2(x) + + return torch.cat([centers, sizes, headings],-1),feat,feat_traj + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear): + kaiming_normal_(m.weight.data) + if m.bias is not None: + zeros_(m.bias) + +class MLP(nn.Module): + + def __init__(self, input_dim, hidden_dim, output_dim, num_layers): + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + return x + +class SpatialMixerBlock(nn.Module): + + def __init__(self,hidden_dim,grid_size,channels,config=None,dropout=0.0): + super().__init__() + + + self.mixer_x = MLP(input_dim = grid_size, hidden_dim = hidden_dim, output_dim = grid_size, num_layers = 3) + self.mixer_y = MLP(input_dim = grid_size, hidden_dim = hidden_dim, output_dim = grid_size, num_layers = 3) + self.mixer_z = MLP(input_dim = grid_size, hidden_dim = hidden_dim, output_dim = grid_size, num_layers = 3) + self.norm_x = nn.LayerNorm(channels) + self.norm_y = nn.LayerNorm(channels) + self.norm_z = nn.LayerNorm(channels) + self.norm_channel = nn.LayerNorm(channels) + self.ffn = nn.Sequential( + nn.Linear(channels, 2*channels), + nn.ReLU(), + nn.Dropout(dropout), + nn.Linear(2*channels, channels), + ) + self.config = config + self.grid_size = grid_size + + def forward(self, src): + + src_3d = src.permute(1,2,0).contiguous().view(src.shape[1],src.shape[2], + self.grid_size,self.grid_size,self.grid_size) + src_3d = src_3d.permute(0,1,4,3,2).contiguous() + mixed_x = self.mixer_x(src_3d) + mixed_x = src_3d + mixed_x + mixed_x = self.norm_x(mixed_x.permute(0,2,3,4,1)).permute(0,4,1,2,3).contiguous() + + mixed_y = self.mixer_y(mixed_x.permute(0,1,2,4,3)).permute(0,1,2,4,3).contiguous() + mixed_y = mixed_x + mixed_y + mixed_y = self.norm_y(mixed_y.permute(0,2,3,4,1)).permute(0,4,1,2,3).contiguous() + + mixed_z = self.mixer_z(mixed_y.permute(0,1,4,3,2)).permute(0,1,4,3,2).contiguous() + + mixed_z = mixed_y + mixed_z + mixed_z = self.norm_z(mixed_z.permute(0,2,3,4,1)).permute(0,4,1,2,3).contiguous() + + src_mixer = mixed_z.view(src.shape[1],src.shape[2],-1).permute(2,0,1) + src_mixer = src_mixer + self.ffn(src_mixer) + src_mixer = self.norm_channel(src_mixer) + + return src_mixer + +class Transformer(nn.Module): + + def __init__(self, config, d_model=512, nhead=8, num_encoder_layers=6, + dim_feedforward=2048, dropout=0.1,activation="relu", normalize_before=False, + num_lidar_points=None,num_proxy_points=None, share_head=True,num_groups=None, + sequence_stride=None,num_frames=None): + super().__init__() + + self.config = config + self.share_head = share_head + self.num_frames = num_frames + self.nhead = nhead + self.sequence_stride = sequence_stride + self.num_groups = num_groups + self.num_proxy_points = num_proxy_points + self.num_lidar_points = num_lidar_points + self.d_model = d_model + self.nhead = nhead + encoder_layer = [TransformerEncoderLayer(self.config, d_model, nhead, dim_feedforward,dropout, activation, + normalize_before, num_lidar_points,num_groups=num_groups) for i in range(num_encoder_layers)] + + encoder_norm = nn.LayerNorm(d_model) if normalize_before else None + self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm,self.config) + + self.token = nn.Parameter(torch.zeros(self.num_groups, 1, d_model)) + + + if self.num_frames >4: + + self.group_length = self.num_frames // self.num_groups + self.fusion_all_group = MLP(input_dim = self.config.hidden_dim*self.group_length, + hidden_dim = self.config.hidden_dim, output_dim = self.config.hidden_dim, num_layers = 4) + + self.fusion_norm = FFN(d_model, dim_feedforward) + + self._reset_parameters() + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def forward(self, src, pos=None): + + BS, N, C = src.shape + if not pos is None: + pos = pos.permute(1, 0, 2) + + if self.num_frames == 16: + token_list = [self.token[i:(i+1)].repeat(BS,1,1) for i in range(self.num_groups)] + if self.sequence_stride ==1: + src_groups = src.view(src.shape[0],src.shape[1]//self.num_groups ,-1).chunk(4,dim=1) + + elif self.sequence_stride ==4: + src_groups = [] + + for i in range(self.num_groups): + groups = [] + for j in range(self.group_length): + points_index_start = (i+j*self.sequence_stride)*self.num_proxy_points + points_index_end = points_index_start + self.num_proxy_points + groups.append(src[:,points_index_start:points_index_end]) + + groups = torch.cat(groups,-1) + src_groups.append(groups) + + else: + raise NotImplementedError + + src_merge = torch.cat(src_groups,1) + src = self.fusion_norm(src[:,:self.num_groups*self.num_proxy_points],self.fusion_all_group(src_merge)) + src = [torch.cat([token_list[i],src[:,i*self.num_proxy_points:(i+1)*self.num_proxy_points]],dim=1) for i in range(self.num_groups)] + src = torch.cat(src,dim=0) + + else: + token_list = [self.token[i:(i+1)].repeat(BS,1,1) for i in range(self.num_groups)] + src = [torch.cat([token_list[i],src[:,i*self.num_proxy_points:(i+1)*self.num_proxy_points]],dim=1) for i in range(self.num_groups)] + src = torch.cat(src,dim=0) + + src = src.permute(1, 0, 2) + memory,tokens = self.encoder(src,pos=pos) + + memory = torch.cat(memory[0:1].chunk(4,dim=1),0) + return memory, tokens + + +class TransformerEncoder(nn.Module): + + def __init__(self, encoder_layer, num_layers, norm=None,config=None): + super().__init__() + self.layers = nn.ModuleList(encoder_layer) + self.num_layers = num_layers + self.norm = norm + self.config = config + + def forward(self, src, + pos: Optional[Tensor] = None): + + token_list = [] + output = src + for layer in self.layers: + output,tokens = layer(output,pos=pos) + token_list.append(tokens) + if self.norm is not None: + output = self.norm(output) + + return output,token_list + + +class TransformerEncoderLayer(nn.Module): + count = 0 + def __init__(self, config, d_model, nhead, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False,num_points=None,num_groups=None): + super().__init__() + TransformerEncoderLayer.count += 1 + self.layer_count = TransformerEncoderLayer.count + self.config = config + self.num_point = num_points + self.num_groups= num_groups + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + + if self.layer_count <= self.config.enc_layers-1: + self.cross_attn_layers = nn.ModuleList() + for _ in range(self.num_groups): + self.cross_attn_layers.append(nn.MultiheadAttention(d_model, nhead, dropout=dropout)) + + self.ffn = FFN(d_model, dim_feedforward) + self.fusion_all_groups = MLP(input_dim = d_model*4, hidden_dim = d_model, output_dim = d_model, num_layers = 4) + + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + self.mlp_mixer_3d = SpatialMixerBlock(self.config.use_mlp_mixer.hidden_dim,self.config.use_mlp_mixer.get('grid_size', 4),self.config.hidden_dim, self.config.use_mlp_mixer) + + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, + src, + pos: Optional[Tensor] = None): + + src_intra_group_fusion = self.mlp_mixer_3d(src[1:]) + src = torch.cat([src[:1],src_intra_group_fusion],0) + + token = src[:1] + + if not pos is None: + key = self.with_pos_embed(src_intra_group_fusion, pos[1:]) + else: + key = src_intra_group_fusion + + src_summary = self.self_attn(token, key, value=src_intra_group_fusion)[0] + token = token + self.dropout1(src_summary) + token = self.norm1(token) + src_summary = self.linear2(self.dropout(self.activation(self.linear1(token)))) + token = token + self.dropout2(src_summary) + token = self.norm2(token) + src = torch.cat([token,src[1:]],0) + + if self.layer_count <= self.config.enc_layers-1: + + src_all_groups = src[1:].view((src.shape[0]-1)*4,-1,src.shape[-1]) + src_groups_list = src_all_groups.chunk(self.num_groups,0) + + src_all_groups = torch.cat(src_groups_list,-1) + src_all_groups_fusion = self.fusion_all_groups(src_all_groups) + + key = self.with_pos_embed(src_all_groups_fusion, pos[1:]) + query_list = [self.with_pos_embed(query, pos[1:]) for query in src_groups_list] + + inter_group_fusion_list = [] + for i in range(self.num_groups): + inter_group_fusion = self.cross_attn_layers[i](query_list[i], key, value=src_all_groups_fusion)[0] + inter_group_fusion = self.ffn(src_groups_list[i],inter_group_fusion) + inter_group_fusion_list.append(inter_group_fusion) + + src_inter_group_fusion = torch.cat(inter_group_fusion_list,1) + + src = torch.cat([src[:1],src_inter_group_fusion],0) + + return src, torch.cat(src[:1].chunk(4,1),0) + + def forward_pre(self, src, + pos: Optional[Tensor] = None): + src2 = self.norm1(src) + q = k = self.with_pos_embed(src2, pos) + src2 = self.self_attn(q, k, value=src2)[0] + src = src + self.dropout1(src2) + src2 = self.norm2(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) + src = src + self.dropout2(src2) + return src + + def forward(self, src, + pos: Optional[Tensor] = None): + + if self.normalize_before: + return self.forward_pre(src, pos) + return self.forward_post(src, pos) + + +def _get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(F"activation should be relu/gelu, not {activation}.") + + +class FFN(nn.Module): + def __init__(self, d_model, dim_feedforward=2048, dropout=0.1,dout=None, + activation="relu", normalize_before=False): + super().__init__() + + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + def forward(self, tgt,tgt_input): + tgt = tgt + self.dropout2(tgt_input) + tgt = self.norm2(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout3(tgt2) + tgt = self.norm3(tgt) + + return tgt + +def build_transformer(args): + return Transformer( + config = args, + d_model=args.hidden_dim, + dropout=args.dropout, + nhead=args.nheads, + dim_feedforward=args.dim_feedforward, + num_encoder_layers=args.enc_layers, + normalize_before=args.pre_norm, + num_lidar_points = args.num_lidar_points, + num_proxy_points = args.num_proxy_points, + num_frames = args.num_frames, + sequence_stride = args.get('sequence_stride',1), + num_groups=args.num_groups, + ) + diff --git a/toolbox/openpcdet/pcdet/models/model_utils/swin_utils.py b/toolbox/openpcdet/pcdet/models/model_utils/swin_utils.py new file mode 100644 index 000000000..3abd51fd1 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/model_utils/swin_utils.py @@ -0,0 +1,659 @@ +""" +Mostly copy-paste from + https://github.com/open-mmlab/mmdetection/blob/ecac3a77becc63f23d9f6980b2a36f86acd00a8a/mmdet/models/layers/transformer/utils.py + +""" + +import copy +import math +import warnings +import collections.abc +from collections import OrderedDict +from itertools import repeat +from typing import Sequence + +import torch +from torch import Tensor +import torch.nn as nn +import torch.nn.functional as F + +# From PyTorch internals +def _ntuple(n): + + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + + return parse + +to_2tuple = _ntuple(2) + +def constant_init(module: nn.Module, val: float, bias: float = 0) -> None: + if hasattr(module, 'weight') and module.weight is not None: + nn.init.constant_(module.weight, val) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +def trunc_normal_init(module: nn.Module, + mean: float = 0, + std: float = 1, + a: float = -2, + b: float = 2, + bias: float = 0) -> None: + if hasattr(module, 'weight') and module.weight is not None: + trunc_normal_(module.weight, mean, std, a, b) # type: ignore + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) # type: ignore + + +def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float, + b: float) -> Tensor: + # Method based on + # https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + # Modified from + # https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1. + math.erf(x / math.sqrt(2.))) / 2. + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn( + 'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. ' + 'The distribution of values may be incorrect.', + stacklevel=2) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + lower = norm_cdf((a - mean) / std) + upper = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [lower, upper], then translate + # to [2lower-1, 2upper-1]. + tensor.uniform_(2 * lower - 1, 2 * upper - 1) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def trunc_normal_(tensor: Tensor, + mean: float = 0., + std: float = 1., + a: float = -2., + b: float = 2.) -> Tensor: + r"""Fills the input Tensor with values drawn from a truncated + normal distribution. The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + + Modified from + https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py + + Args: + tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`. + mean (float): the mean of the normal distribution. + std (float): the standard deviation of the normal distribution. + a (float): the minimum cutoff value. + b (float): the maximum cutoff value. + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b) + + +def drop_path(x: torch.Tensor, + drop_prob: float = 0., + training: bool = False) -> torch.Tensor: + """Drop paths (Stochastic Depth) per sample (when applied in main path of + residual blocks). + + We follow the implementation + https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501 + """ + if drop_prob == 0. or not training: + return x + keep_prob = 1 - drop_prob + # handle tensors with different dimensions, not just 4D tensors. + shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) + random_tensor = keep_prob + torch.rand( + shape, dtype=x.dtype, device=x.device) + output = x.div(keep_prob) * random_tensor.floor() + return output + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of + residual blocks). + + We follow the implementation + https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501 + + Args: + drop_prob (float): Probability of the path to be zeroed. Default: 0.1 + """ + + def __init__(self, drop_prob: float = 0.1): + super().__init__() + self.drop_prob = drop_prob + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return drop_path(x, self.drop_prob, self.training) + + +class FFN(nn.Module): + """Implements feed-forward networks (FFNs) with identity connection. + + Args: + embed_dims (int): The feature dimension. Same as + `MultiheadAttention`. Defaults: 256. + feedforward_channels (int): The hidden dimension of FFNs. + Defaults: 1024. + num_fcs (int, optional): The number of fully-connected layers in + FFNs. Default: 2. + act_cfg (dict, optional): The activation config for FFNs. + Default: dict(type='ReLU') + ffn_drop (float, optional): Probability of an element to be + zeroed in FFN. Default 0.0. + add_identity (bool, optional): Whether to add the + identity connection. Default: `True`. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0., + dropout_layer=None, + add_identity=True, + init_cfg=None, + **kwargs): + super().__init__() + self._is_init = False + self.init_cfg = copy.deepcopy(init_cfg) + assert num_fcs >= 2, 'num_fcs should be no less ' \ + f'than 2. got {num_fcs}.' + self.embed_dims = embed_dims + self.feedforward_channels = feedforward_channels + self.num_fcs = num_fcs + self.act_cfg = act_cfg + # ignore act_cfg, default GELU + self.activate = nn.GELU() + + layers = [] + in_channels = embed_dims + for _ in range(num_fcs - 1): + layers.append( + nn.Sequential( + nn.Linear(in_channels, feedforward_channels), self.activate, + nn.Dropout(ffn_drop))) + in_channels = feedforward_channels + layers.append(nn.Linear(feedforward_channels, embed_dims)) + layers.append(nn.Dropout(ffn_drop)) + self.layers = nn.Sequential(*layers) + self.dropout_layer = DropPath(dropout_layer['drop_prob']) + self.add_identity = add_identity + + def forward(self, x, identity=None): + """Forward function for `FFN`. + + The function would add x to the output tensor if residue is None. + """ + out = self.layers(x) + if not self.add_identity: + return self.dropout_layer(out) + if identity is None: + identity = x + return identity + self.dropout_layer(out) + + + +def nlc_to_nchw(x, hw_shape): + """Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor. + + Args: + x (Tensor): The input tensor of shape [N, L, C] before conversion. + hw_shape (Sequence[int]): The height and width of output feature map. + + Returns: + Tensor: The output tensor of shape [N, C, H, W] after conversion. + """ + H, W = hw_shape + assert len(x.shape) == 3 + B, L, C = x.shape + assert L == H * W, 'The seq_len does not match H, W' + return x.transpose(1, 2).reshape(B, C, H, W).contiguous() + + +def nchw_to_nlc(x): + """Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor. + + Args: + x (Tensor): The input tensor of shape [N, C, H, W] before conversion. + + Returns: + Tensor: The output tensor of shape [N, L, C] after conversion. + """ + assert len(x.shape) == 4 + return x.flatten(2).transpose(1, 2).contiguous() + + +class AdaptivePadding(nn.Module): + """Applies padding to input (if needed) so that input can get fully covered + by filter you specified. It support two modes "same" and "corner". The + "same" mode is same with "SAME" padding mode in TensorFlow, pad zero around + input. The "corner" mode would pad zero to bottom right. + + Args: + kernel_size (int | tuple): Size of the kernel: + stride (int | tuple): Stride of the filter. Default: 1: + dilation (int | tuple): Spacing between kernel elements. + Default: 1 + padding (str): Support "same" and "corner", "corner" mode + would pad zero to bottom right, and "same" mode would + pad zero around input. Default: "corner". + Example: + >>> kernel_size = 16 + >>> stride = 16 + >>> dilation = 1 + >>> input = torch.rand(1, 1, 15, 17) + >>> adap_pad = AdaptivePadding( + >>> kernel_size=kernel_size, + >>> stride=stride, + >>> dilation=dilation, + >>> padding="corner") + >>> out = adap_pad(input) + >>> assert (out.shape[2], out.shape[3]) == (16, 32) + >>> input = torch.rand(1, 1, 16, 17) + >>> out = adap_pad(input) + >>> assert (out.shape[2], out.shape[3]) == (16, 32) + """ + + def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'): + + super(AdaptivePadding, self).__init__() + + assert padding in ('same', 'corner') + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + padding = to_2tuple(padding) + dilation = to_2tuple(dilation) + + self.padding = padding + self.kernel_size = kernel_size + self.stride = stride + self.dilation = dilation + + def get_pad_shape(self, input_shape): + input_h, input_w = input_shape + kernel_h, kernel_w = self.kernel_size + stride_h, stride_w = self.stride + output_h = math.ceil(input_h / stride_h) + output_w = math.ceil(input_w / stride_w) + pad_h = max((output_h - 1) * stride_h + + (kernel_h - 1) * self.dilation[0] + 1 - input_h, 0) + pad_w = max((output_w - 1) * stride_w + + (kernel_w - 1) * self.dilation[1] + 1 - input_w, 0) + return pad_h, pad_w + + def forward(self, x): + pad_h, pad_w = self.get_pad_shape(x.size()[-2:]) + if pad_h > 0 or pad_w > 0: + if self.padding == 'corner': + x = F.pad(x, [0, pad_w, 0, pad_h]) + elif self.padding == 'same': + x = F.pad(x, [ + pad_w // 2, pad_w - pad_w // 2, pad_h // 2, + pad_h - pad_h // 2 + ]) + return x + + +class PatchEmbed(nn.Module): + """Image to Patch Embedding. + + We use a conv layer to implement PatchEmbed. + + Args: + in_channels (int): The num of input channels. Default: 3 + embed_dims (int): The dimensions of embedding. Default: 768 + conv_type (str): The config dict for embedding + conv layer type selection. Default: "Conv2d. + kernel_size (int): The kernel_size of embedding conv. Default: 16. + stride (int): The slide stride of embedding conv. + Default: None (Would be set as `kernel_size`). + padding (int | tuple | string ): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int): The dilation rate of embedding conv. Default: 1. + bias (bool): Bias of embed conv. Default: True. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None. + input_size (int | tuple | None): The size of input, which will be + used to calculate the out size. Only work when `dynamic_size` + is False. Default: None. + init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. + Default: None. + """ + + def __init__( + self, + in_channels=3, + embed_dims=768, + conv_type='Conv2d', + kernel_size=16, + stride=16, + padding='corner', + dilation=1, + bias=True, + norm_cfg=None, + input_size=None, + init_cfg=None, + ): + super(PatchEmbed, self).__init__() + self._is_init = False + + self.init_cfg = copy.deepcopy(init_cfg) + self.embed_dims = embed_dims + if stride is None: + stride = kernel_size + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adap_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of conv + padding = 0 + else: + self.adap_padding = None + padding = to_2tuple(padding) + + self.projection = nn.Conv2d( + in_channels=in_channels, + out_channels=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + if norm_cfg is not None: + self.norm = nn.LayerNorm(embed_dims) + else: + self.norm = None + + if input_size: + input_size = to_2tuple(input_size) + # `init_out_size` would be used outside to + # calculate the num_patches + # when `use_abs_pos_embed` outside + self.init_input_size = input_size + if self.adap_padding: + pad_h, pad_w = self.adap_padding.get_pad_shape(input_size) + input_h, input_w = input_size + input_h = input_h + pad_h + input_w = input_w + pad_w + input_size = (input_h, input_w) + + # https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html + h_out = (input_size[0] + 2 * padding[0] - dilation[0] * + (kernel_size[0] - 1) - 1) // stride[0] + 1 + w_out = (input_size[1] + 2 * padding[1] - dilation[1] * + (kernel_size[1] - 1) - 1) // stride[1] + 1 + self.init_out_size = (h_out, w_out) + else: + self.init_input_size = None + self.init_out_size = None + + def forward(self, x): + """ + Args: + x (Tensor): Has shape (B, C, H, W). In most case, C is 3. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, out_h * out_w, embed_dims) + - out_size (tuple[int]): Spatial shape of x, arrange as + (out_h, out_w). + """ + + if self.adap_padding: + x = self.adap_padding(x) + + x = self.projection(x) + out_size = (x.shape[2], x.shape[3]) + x = x.flatten(2).transpose(1, 2) + if self.norm is not None: + x = self.norm(x) + return x, out_size + + +class PatchMerging(nn.Module): + """Merge patch feature map. + + This layer groups feature map by kernel_size, and applies norm and linear + layers to the grouped feature map. Our implementation uses `nn.Unfold` to + merge patch, which is about 25% faster than original implementation. + Instead, we need to modify pretrained models for compatibility. + + Args: + in_channels (int): The num of input channels. + to gets fully covered by filter and stride you specified.. + Default: True. + out_channels (int): The num of output channels. + kernel_size (int | tuple, optional): the kernel size in the unfold + layer. Defaults to 2. + stride (int | tuple, optional): the stride of the sliding blocks in the + unfold layer. Default: None. (Would be set as `kernel_size`) + padding (int | tuple | string ): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int | tuple, optional): dilation parameter in the unfold + layer. Default: 1. + bias (bool, optional): Whether to add bias in linear layer or not. + Defaults: False. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: dict(type='LN'). + init_cfg (dict, optional): The extra config for initialization. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=2, + stride=None, + padding='corner', + dilation=1, + bias=False, + norm_cfg=dict(type='LN'), + init_cfg=None): + super().__init__() + self._is_init = False + self.init_cfg = copy.deepcopy(init_cfg) + + self.in_channels = in_channels + self.out_channels = out_channels + if stride: + stride = stride + else: + stride = kernel_size + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adap_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of unfold + padding = 0 + else: + self.adap_padding = None + + padding = to_2tuple(padding) + self.sampler = nn.Unfold( + kernel_size=kernel_size, + dilation=dilation, + padding=padding, + stride=stride) + + sample_dim = kernel_size[0] * kernel_size[1] * in_channels + + if norm_cfg is not None: + self.norm = nn.LayerNorm(sample_dim) + else: + self.norm = None + + self.reduction = nn.Linear(sample_dim, out_channels, bias=bias) + + def forward(self, x, input_size): + """ + Args: + x (Tensor): Has shape (B, H*W, C_in). + input_size (tuple[int]): The spatial shape of x, arrange as (H, W). + Default: None. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) + - out_size (tuple[int]): Spatial shape of x, arrange as + (Merged_H, Merged_W). + """ + B, L, C = x.shape + assert isinstance(input_size, Sequence), f'Expect ' \ + f'input_size is ' \ + f'`Sequence` ' \ + f'but get {input_size}' + + H, W = input_size + assert L == H * W, 'input feature has wrong size' + + x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W + # Use nn.Unfold to merge patch. About 25% faster than original method, + # but need to modify pretrained model for compatibility + + if self.adap_padding: + x = self.adap_padding(x) + H, W = x.shape[-2:] + + x = self.sampler(x) + # if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2) + + out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] * + (self.sampler.kernel_size[0] - 1) - + 1) // self.sampler.stride[0] + 1 + out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] * + (self.sampler.kernel_size[1] - 1) - + 1) // self.sampler.stride[1] + 1 + + output_size = (out_h, out_w) + x = x.transpose(1, 2) # B, H/2*W/2, 4*C + x = self.norm(x) if self.norm else x + x = self.reduction(x) + return x, output_size + + +def inverse_sigmoid(x, eps=1e-5): + """Inverse function of sigmoid. + + Args: + x (Tensor): The tensor to do the + inverse. + eps (float): EPS avoid numerical + overflow. Defaults 1e-5. + Returns: + Tensor: The x has passed the inverse + function of sigmoid, has same + shape with input. + """ + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) + + + +def swin_converter(ckpt): + + new_ckpt = OrderedDict() + + def correct_unfold_reduction_order(x): + out_channel, in_channel = x.shape + x = x.reshape(out_channel, 4, in_channel // 4) + x = x[:, [0, 2, 1, 3], :].transpose(1, + 2).reshape(out_channel, in_channel) + return x + + def correct_unfold_norm_order(x): + in_channel = x.shape[0] + x = x.reshape(4, in_channel // 4) + x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel) + return x + + for k, v in ckpt.items(): + if k.startswith('head'): + continue + elif k.startswith('layers'): + new_v = v + if 'attn.' in k: + new_k = k.replace('attn.', 'attn.w_msa.') + elif 'mlp.' in k: + if 'mlp.fc1.' in k: + new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.') + elif 'mlp.fc2.' in k: + new_k = k.replace('mlp.fc2.', 'ffn.layers.1.') + else: + new_k = k.replace('mlp.', 'ffn.') + elif 'downsample' in k: + new_k = k + if 'reduction.' in k: + new_v = correct_unfold_reduction_order(v) + elif 'norm.' in k: + new_v = correct_unfold_norm_order(v) + else: + new_k = k + new_k = new_k.replace('layers', 'stages', 1) + elif k.startswith('patch_embed'): + new_v = v + if 'proj' in k: + new_k = k.replace('proj', 'projection') + else: + new_k = k + else: + new_v = v + new_k = k + + new_ckpt['backbone.' + new_k] = new_v + + return new_ckpt diff --git a/toolbox/openpcdet/pcdet/models/model_utils/transfusion_utils.py b/toolbox/openpcdet/pcdet/models/model_utils/transfusion_utils.py new file mode 100644 index 000000000..677827cba --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/model_utils/transfusion_utils.py @@ -0,0 +1,102 @@ +import torch +from torch import nn +import torch.nn.functional as F + +def clip_sigmoid(x, eps=1e-4): + y = torch.clamp(x.sigmoid_(), min=eps, max=1 - eps) + return y + + +class PositionEmbeddingLearned(nn.Module): + """ + Absolute pos embedding, learned. + """ + + def __init__(self, input_channel, num_pos_feats=288): + super().__init__() + self.position_embedding_head = nn.Sequential( + nn.Conv1d(input_channel, num_pos_feats, kernel_size=1), + nn.BatchNorm1d(num_pos_feats), + nn.ReLU(inplace=True), + nn.Conv1d(num_pos_feats, num_pos_feats, kernel_size=1)) + + def forward(self, xyz): + xyz = xyz.transpose(1, 2).contiguous() + position_embedding = self.position_embedding_head(xyz) + return position_embedding + + +class TransformerDecoderLayer(nn.Module): + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu", + self_posembed=None, cross_posembed=None, cross_only=False): + super().__init__() + self.cross_only = cross_only + if not self.cross_only: + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + def _get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(F"activation should be relu/gelu, not {activation}.") + + self.activation = _get_activation_fn(activation) + + self.self_posembed = self_posembed + self.cross_posembed = cross_posembed + + def with_pos_embed(self, tensor, pos_embed): + return tensor if pos_embed is None else tensor + pos_embed + + def forward(self, query, key, query_pos, key_pos, key_padding_mask=None, attn_mask=None): + # NxCxP to PxNxC + if self.self_posembed is not None: + query_pos_embed = self.self_posembed(query_pos).permute(2, 0, 1) + else: + query_pos_embed = None + if self.cross_posembed is not None: + key_pos_embed = self.cross_posembed(key_pos).permute(2, 0, 1) + else: + key_pos_embed = None + + query = query.permute(2, 0, 1) + key = key.permute(2, 0, 1) + + if not self.cross_only: + q = k = v = self.with_pos_embed(query, query_pos_embed) + query2 = self.self_attn(q, k, value=v)[0] + query = query + self.dropout1(query2) + query = self.norm1(query) + + query2 = self.multihead_attn(query=self.with_pos_embed(query, query_pos_embed), + key=self.with_pos_embed(key, key_pos_embed), + value=self.with_pos_embed(key, key_pos_embed), + key_padding_mask=key_padding_mask, attn_mask=attn_mask)[0] + + query = query + self.dropout2(query2) + query = self.norm2(query) + + query2 = self.linear2(self.dropout(self.activation(self.linear1(query)))) + query = query + self.dropout3(query2) + query = self.norm3(query) + + # NxCxP to PxNxC + query = query.permute(1, 2, 0) + return query + diff --git a/toolbox/openpcdet/pcdet/models/roi_heads/__init__.py b/toolbox/openpcdet/pcdet/models/roi_heads/__init__.py new file mode 100644 index 000000000..693cec426 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/roi_heads/__init__.py @@ -0,0 +1,19 @@ +from .partA2_head import PartA2FCHead +from .pointrcnn_head import PointRCNNHead +from .pvrcnn_head import PVRCNNHead +from .second_head import SECONDHead +from .voxelrcnn_head import VoxelRCNNHead +from .roi_head_template import RoIHeadTemplate +from .mppnet_head import MPPNetHead +from .mppnet_memory_bank_e2e import MPPNetHeadE2E + +__all__ = { + 'RoIHeadTemplate': RoIHeadTemplate, + 'PartA2FCHead': PartA2FCHead, + 'PVRCNNHead': PVRCNNHead, + 'SECONDHead': SECONDHead, + 'PointRCNNHead': PointRCNNHead, + 'VoxelRCNNHead': VoxelRCNNHead, + 'MPPNetHead': MPPNetHead, + 'MPPNetHeadE2E': MPPNetHeadE2E, +} diff --git a/toolbox/openpcdet/pcdet/models/roi_heads/mppnet_head.py b/toolbox/openpcdet/pcdet/models/roi_heads/mppnet_head.py new file mode 100644 index 000000000..909b9c732 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/roi_heads/mppnet_head.py @@ -0,0 +1,992 @@ +from typing import ValuesView +import torch.nn as nn +import torch +import numpy as np +import copy +import torch.nn.functional as F +from pcdet.ops.iou3d_nms import iou3d_nms_utils +from ...utils import common_utils, loss_utils +from .roi_head_template import RoIHeadTemplate +from ..model_utils.mppnet_utils import build_transformer, PointNet, MLP +from .target_assigner.proposal_target_layer import ProposalTargetLayer +from pcdet.ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules + + +class ProposalTargetLayerMPPNet(ProposalTargetLayer): + def __init__(self, roi_sampler_cfg): + super().__init__(roi_sampler_cfg = roi_sampler_cfg) + + def forward(self, batch_dict): + """ + Args: + batch_dict: + batch_size: + rois: (B, num_rois, 7 + C) + roi_scores: (B, num_rois) + gt_boxes: (B, N, 7 + C + 1) + roi_labels: (B, num_rois) + Returns: + batch_dict: + rois: (B, M, 7 + C) + gt_of_rois: (B, M, 7 + C) + gt_iou_of_rois: (B, M) + roi_scores: (B, M) + roi_labels: (B, M) + reg_valid_mask: (B, M) + rcnn_cls_labels: (B, M) + """ + + batch_rois, batch_gt_of_rois, batch_roi_ious, batch_roi_scores, batch_roi_labels, \ + batch_trajectory_rois,batch_valid_length = self.sample_rois_for_mppnet(batch_dict=batch_dict) + + # regression valid mask + reg_valid_mask = (batch_roi_ious > self.roi_sampler_cfg.REG_FG_THRESH).long() + + # classification label + if self.roi_sampler_cfg.CLS_SCORE_TYPE == 'cls': + batch_cls_labels = (batch_roi_ious > self.roi_sampler_cfg.CLS_FG_THRESH).long() + ignore_mask = (batch_roi_ious > self.roi_sampler_cfg.CLS_BG_THRESH) & \ + (batch_roi_ious < self.roi_sampler_cfg.CLS_FG_THRESH) + batch_cls_labels[ignore_mask > 0] = -1 + elif self.roi_sampler_cfg.CLS_SCORE_TYPE == 'roi_iou': + iou_bg_thresh = self.roi_sampler_cfg.CLS_BG_THRESH + iou_fg_thresh = self.roi_sampler_cfg.CLS_FG_THRESH + fg_mask = batch_roi_ious > iou_fg_thresh + bg_mask = batch_roi_ious < iou_bg_thresh + interval_mask = (fg_mask == 0) & (bg_mask == 0) + + batch_cls_labels = (fg_mask > 0).float() + batch_cls_labels[interval_mask] = \ + (batch_roi_ious[interval_mask] - iou_bg_thresh) / (iou_fg_thresh - iou_bg_thresh) + else: + raise NotImplementedError + + + targets_dict = {'rois': batch_rois, 'gt_of_rois': batch_gt_of_rois, + 'gt_iou_of_rois': batch_roi_ious,'roi_scores': batch_roi_scores, + 'roi_labels': batch_roi_labels,'reg_valid_mask': reg_valid_mask, + 'rcnn_cls_labels': batch_cls_labels,'trajectory_rois':batch_trajectory_rois, + 'valid_length': batch_valid_length, + } + + return targets_dict + + def sample_rois_for_mppnet(self, batch_dict): + """ + Args: + batch_dict: + batch_size: + rois: (B, num_rois, 7 + C) + roi_scores: (B, num_rois) + gt_boxes: (B, N, 7 + C + 1) + roi_labels: (B, num_rois) + Returns: + """ + cur_frame_idx = 0 + batch_size = batch_dict['batch_size'] + rois = batch_dict['trajectory_rois'][:,cur_frame_idx,:,:] + roi_scores = batch_dict['roi_scores'][:,:,cur_frame_idx] + roi_labels = batch_dict['roi_labels'] + gt_boxes = batch_dict['gt_boxes'] + + code_size = rois.shape[-1] + batch_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size) + batch_gt_of_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, gt_boxes.shape[-1]) + batch_roi_ious = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE) + batch_roi_scores = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE) + batch_roi_labels = rois.new_zeros((batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE), dtype=torch.long) + + + + trajectory_rois = batch_dict['trajectory_rois'] + batch_trajectory_rois = rois.new_zeros(batch_size, trajectory_rois.shape[1],self.roi_sampler_cfg.ROI_PER_IMAGE,trajectory_rois.shape[-1]) + + valid_length = batch_dict['valid_length'] + batch_valid_length = rois.new_zeros((batch_size, batch_dict['trajectory_rois'].shape[1], self.roi_sampler_cfg.ROI_PER_IMAGE)) + + for index in range(batch_size): + + cur_trajectory_rois = trajectory_rois[index] + + cur_roi, cur_gt, cur_roi_labels, cur_roi_scores = rois[index],gt_boxes[index], roi_labels[index], roi_scores[index] + + if 'valid_length' in batch_dict.keys(): + cur_valid_length = valid_length[index] + + + + k = cur_gt.__len__() - 1 + while k > 0 and cur_gt[k].sum() == 0: + k -= 1 + + cur_gt = cur_gt[:k + 1] + cur_gt = cur_gt.new_zeros((1, cur_gt.shape[1])) if len(cur_gt) == 0 else cur_gt + + if self.roi_sampler_cfg.get('SAMPLE_ROI_BY_EACH_CLASS', False): + max_overlaps, gt_assignment = self.get_max_iou_with_same_class( + rois=cur_roi, roi_labels=cur_roi_labels, + gt_boxes=cur_gt[:, 0:7], gt_labels=cur_gt[:, -1].long() + ) + + else: + iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi, cur_gt[:, 0:7]) # (M, N) + max_overlaps, gt_assignment = torch.max(iou3d, dim=1) + + sampled_inds,fg_inds, bg_inds = self.subsample_rois(max_overlaps=max_overlaps) + + batch_roi_labels[index] = cur_roi_labels[sampled_inds.long()] + + + if self.roi_sampler_cfg.get('USE_ROI_AUG',False): + + fg_rois, fg_iou3d = self.aug_roi_by_noise_torch(cur_roi[fg_inds], cur_gt[gt_assignment[fg_inds]], + max_overlaps[fg_inds], aug_times=self.roi_sampler_cfg.ROI_FG_AUG_TIMES) + bg_rois = cur_roi[bg_inds] + bg_iou3d = max_overlaps[bg_inds] + + batch_rois[index] = torch.cat([fg_rois,bg_rois],0) + batch_roi_ious[index] = torch.cat([fg_iou3d,bg_iou3d],0) + batch_gt_of_rois[index] = cur_gt[gt_assignment[sampled_inds]] + + else: + batch_rois[index] = cur_roi[sampled_inds] + batch_roi_ious[index] = max_overlaps[sampled_inds] + batch_gt_of_rois[index] = cur_gt[gt_assignment[sampled_inds]] + + + batch_roi_scores[index] = cur_roi_scores[sampled_inds] + + if 'valid_length' in batch_dict.keys(): + batch_valid_length[index] = cur_valid_length[:,sampled_inds] + + if self.roi_sampler_cfg.USE_TRAJ_AUG.ENABLED: + batch_trajectory_rois_list = [] + for idx in range(0,batch_dict['num_frames']): + if idx== cur_frame_idx: + batch_trajectory_rois_list.append(cur_trajectory_rois[cur_frame_idx:cur_frame_idx+1,sampled_inds]) + continue + fg_trajs, _ = self.aug_roi_by_noise_torch(cur_trajectory_rois[idx,fg_inds], cur_trajectory_rois[idx,fg_inds][:,:8], max_overlaps[fg_inds], \ + aug_times=self.roi_sampler_cfg.ROI_FG_AUG_TIMES,pos_thresh=self.roi_sampler_cfg.USE_TRAJ_AUG.THRESHOD) + bg_trajs = cur_trajectory_rois[idx,bg_inds] + batch_trajectory_rois_list.append(torch.cat([fg_trajs,bg_trajs],0)[None,:,:]) + batch_trajectory_rois[index] = torch.cat(batch_trajectory_rois_list,0) + else: + batch_trajectory_rois[index] = cur_trajectory_rois[:,sampled_inds] + + return batch_rois, batch_gt_of_rois, batch_roi_ious, batch_roi_scores, batch_roi_labels, batch_trajectory_rois,batch_valid_length + + def subsample_rois(self, max_overlaps): + # sample fg, easy_bg, hard_bg + fg_rois_per_image = int(np.round(self.roi_sampler_cfg.FG_RATIO * self.roi_sampler_cfg.ROI_PER_IMAGE)) + fg_thresh = min(self.roi_sampler_cfg.REG_FG_THRESH, self.roi_sampler_cfg.CLS_FG_THRESH) + + fg_inds = ((max_overlaps >= fg_thresh)).nonzero().view(-1) + easy_bg_inds = ((max_overlaps < self.roi_sampler_cfg.CLS_BG_THRESH_LO)).nonzero().view(-1) + hard_bg_inds = ((max_overlaps < self.roi_sampler_cfg.REG_FG_THRESH) & + (max_overlaps >= self.roi_sampler_cfg.CLS_BG_THRESH_LO)).nonzero().view(-1) + + fg_num_rois = fg_inds.numel() + bg_num_rois = hard_bg_inds.numel() + easy_bg_inds.numel() + + if fg_num_rois > 0 and bg_num_rois > 0: + # sampling fg + fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois) + + rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).type_as(max_overlaps).long() + fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]] + + # sampling bg + bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE - fg_rois_per_this_image + bg_inds = self.sample_bg_inds( + hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO + ) + + elif fg_num_rois > 0 and bg_num_rois == 0: + # sampling fg + rand_num = np.floor(np.random.rand(self.roi_sampler_cfg.ROI_PER_IMAGE) * fg_num_rois) + rand_num = torch.from_numpy(rand_num).type_as(max_overlaps).long() + fg_inds = fg_inds[rand_num] + bg_inds = torch.tensor([]).type_as(fg_inds) + + elif bg_num_rois > 0 and fg_num_rois == 0: + # sampling bg + bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE + bg_inds = self.sample_bg_inds( + hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO + ) + else: + print('maxoverlaps:(min=%f, max=%f)' % (max_overlaps.min().item(), max_overlaps.max().item())) + print('ERROR: FG=%d, BG=%d' % (fg_num_rois, bg_num_rois)) + raise NotImplementedError + + sampled_inds = torch.cat((fg_inds, bg_inds), dim=0) + return sampled_inds.long(), fg_inds.long(), bg_inds.long() + + def aug_roi_by_noise_torch(self,roi_boxes3d, gt_boxes3d, iou3d_src, aug_times=10, pos_thresh=None): + iou_of_rois = torch.zeros(roi_boxes3d.shape[0]).type_as(gt_boxes3d) + if pos_thresh is None: + pos_thresh = min(self.roi_sampler_cfg.REG_FG_THRESH, self.roi_sampler_cfg.CLS_FG_THRESH) + + for k in range(roi_boxes3d.shape[0]): + temp_iou = cnt = 0 + roi_box3d = roi_boxes3d[k] + + gt_box3d = gt_boxes3d[k].view(1, gt_boxes3d.shape[-1]) + aug_box3d = roi_box3d + keep = True + while temp_iou < pos_thresh and cnt < aug_times: + if np.random.rand() <= self.roi_sampler_cfg.RATIO: + aug_box3d = roi_box3d # p=RATIO to keep the original roi box + keep = True + else: + aug_box3d = self.random_aug_box3d(roi_box3d) + keep = False + aug_box3d = aug_box3d.view((1, aug_box3d.shape[-1])) + iou3d = iou3d_nms_utils.boxes_iou3d_gpu(aug_box3d[:,:7], gt_box3d[:,:7]) + temp_iou = iou3d[0][0] + cnt += 1 + roi_boxes3d[k] = aug_box3d.view(-1) + if cnt == 0 or keep: + iou_of_rois[k] = iou3d_src[k] + else: + iou_of_rois[k] = temp_iou + return roi_boxes3d, iou_of_rois + + def random_aug_box3d(self,box3d): + """ + :param box3d: (7) [x, y, z, h, w, l, ry] + random shift, scale, orientation + """ + + if self.roi_sampler_cfg.REG_AUG_METHOD == 'single': + pos_shift = (torch.rand(3, device=box3d.device) - 0.5) # [-0.5 ~ 0.5] + hwl_scale = (torch.rand(3, device=box3d.device) - 0.5) / (0.5 / 0.15) + 1.0 # + angle_rot = (torch.rand(1, device=box3d.device) - 0.5) / (0.5 / (np.pi / 12)) # [-pi/12 ~ pi/12] + aug_box3d = torch.cat([box3d[0:3] + pos_shift, box3d[3:6] * hwl_scale, box3d[6:7] + angle_rot, box3d[7:]], dim=0) + return aug_box3d + elif self.roi_sampler_cfg.REG_AUG_METHOD == 'multiple': + # pos_range, hwl_range, angle_range, mean_iou + range_config = [[0.2, 0.1, np.pi / 12, 0.7], + [0.3, 0.15, np.pi / 12, 0.6], + [0.5, 0.15, np.pi / 9, 0.5], + [0.8, 0.15, np.pi / 6, 0.3], + [1.0, 0.15, np.pi / 3, 0.2]] + idx = torch.randint(low=0, high=len(range_config), size=(1,))[0].long() + + pos_shift = ((torch.rand(3, device=box3d.device) - 0.5) / 0.5) * range_config[idx][0] + hwl_scale = ((torch.rand(3, device=box3d.device) - 0.5) / 0.5) * range_config[idx][1] + 1.0 + angle_rot = ((torch.rand(1, device=box3d.device) - 0.5) / 0.5) * range_config[idx][2] + + aug_box3d = torch.cat([box3d[0:3] + pos_shift, box3d[3:6] * hwl_scale, box3d[6:7] + angle_rot], dim=0) + return aug_box3d + elif self.roi_sampler_cfg.REG_AUG_METHOD == 'normal': + x_shift = np.random.normal(loc=0, scale=0.3) + y_shift = np.random.normal(loc=0, scale=0.2) + z_shift = np.random.normal(loc=0, scale=0.3) + h_shift = np.random.normal(loc=0, scale=0.25) + w_shift = np.random.normal(loc=0, scale=0.15) + l_shift = np.random.normal(loc=0, scale=0.5) + ry_shift = ((torch.rand() - 0.5) / 0.5) * np.pi / 12 + + aug_box3d = np.array([box3d[0] + x_shift, box3d[1] + y_shift, box3d[2] + z_shift, box3d[3] + h_shift, + box3d[4] + w_shift, box3d[5] + l_shift, box3d[6] + ry_shift], dtype=np.float32) + aug_box3d = torch.from_numpy(aug_box3d).type_as(box3d) + return aug_box3d + else: + raise NotImplementedError + +class MPPNetHead(RoIHeadTemplate): + def __init__(self,model_cfg, num_class=1,**kwargs): + super().__init__(num_class=num_class, model_cfg=model_cfg) + self.model_cfg = model_cfg + self.proposal_target_layer = ProposalTargetLayerMPPNet(roi_sampler_cfg=self.model_cfg.TARGET_CONFIG) + self.use_time_stamp = self.model_cfg.get('USE_TIMESTAMP',None) + self.num_lidar_points = self.model_cfg.Transformer.num_lidar_points + self.avg_stage1_score = self.model_cfg.get('AVG_STAGE1_SCORE', None) + + self.nhead = model_cfg.Transformer.nheads + self.num_enc_layer = model_cfg.Transformer.enc_layers + hidden_dim = model_cfg.TRANS_INPUT + self.hidden_dim = model_cfg.TRANS_INPUT + self.num_groups = model_cfg.Transformer.num_groups + + self.grid_size = model_cfg.ROI_GRID_POOL.GRID_SIZE + self.num_proxy_points = model_cfg.Transformer.num_proxy_points + self.seqboxembed = PointNet(8,model_cfg=self.model_cfg) + self.jointembed = MLP(self.hidden_dim*(self.num_groups+1), model_cfg.Transformer.hidden_dim, self.box_coder.code_size * self.num_class, 4) + + + num_radius = len(self.model_cfg.ROI_GRID_POOL.POOL_RADIUS) + self.up_dimension_geometry = MLP(input_dim = 29, hidden_dim = 64, output_dim =hidden_dim//num_radius, num_layers = 3) + self.up_dimension_motion = MLP(input_dim = 30, hidden_dim = 64, output_dim = hidden_dim, num_layers = 3) + + self.transformer = build_transformer(model_cfg.Transformer) + + self.roi_grid_pool_layer = pointnet2_stack_modules.StackSAModuleMSG( + radii=self.model_cfg.ROI_GRID_POOL.POOL_RADIUS, + nsamples=self.model_cfg.ROI_GRID_POOL.NSAMPLE, + mlps=self.model_cfg.ROI_GRID_POOL.MLPS, + use_xyz=True, + pool_method=self.model_cfg.ROI_GRID_POOL.POOL_METHOD, + ) + + self.class_embed = nn.ModuleList() + self.class_embed.append(nn.Linear(model_cfg.Transformer.hidden_dim, 1)) + + self.bbox_embed = nn.ModuleList() + for _ in range(self.num_groups): + self.bbox_embed.append(MLP(model_cfg.Transformer.hidden_dim, model_cfg.Transformer.hidden_dim, self.box_coder.code_size * self.num_class, 4)) + + if self.model_cfg.Transformer.use_grid_pos.enabled: + if self.model_cfg.Transformer.use_grid_pos.init_type == 'index': + self.grid_index = torch.cat([i.reshape(-1,1)for i in torch.meshgrid(torch.arange(self.grid_size), torch.arange(self.grid_size), torch.arange(self.grid_size))],1).float().cuda() + self.grid_pos_embeded = MLP(input_dim = 3, hidden_dim = 256, output_dim = hidden_dim, num_layers = 2) + else: + self.pos = nn.Parameter(torch.zeros(1, self.num_grid_points, 256)) + + def init_weights(self, weight_init='xavier'): + if weight_init == 'kaiming': + init_func = nn.init.kaiming_normal_ + elif weight_init == 'xavier': + init_func = nn.init.xavier_normal_ + elif weight_init == 'normal': + init_func = nn.init.normal_ + else: + raise NotImplementedError + + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d): + if weight_init == 'normal': + init_func(m.weight, mean=0, std=0.001) + else: + init_func(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + nn.init.normal_(self.bbox_embed.layers[-1].weight, mean=0, std=0.001) + + def get_corner_points_of_roi(self, rois): + rois = rois.view(-1, rois.shape[-1]) + batch_size_rcnn = rois.shape[0] + + local_roi_grid_points = self.get_corner_points(rois, batch_size_rcnn) + local_roi_grid_points = common_utils.rotate_points_along_z( + local_roi_grid_points.clone(), rois[:, 6] + ).squeeze(dim=1) + global_center = rois[:, 0:3].clone() + + global_roi_grid_points = local_roi_grid_points + global_center.unsqueeze(dim=1) + return global_roi_grid_points, local_roi_grid_points + + @staticmethod + def get_dense_grid_points(rois, batch_size_rcnn, grid_size): + faked_features = rois.new_ones((grid_size, grid_size, grid_size)) + dense_idx = faked_features.nonzero() + dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() + + local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6] + roi_grid_points = (dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze(dim=1) \ + - (local_roi_size.unsqueeze(dim=1) / 2) + return roi_grid_points + + @staticmethod + def get_corner_points(rois, batch_size_rcnn): + faked_features = rois.new_ones((2, 2, 2)) + + dense_idx = faked_features.nonzero() + dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() + + local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6] + roi_grid_points = dense_idx * local_roi_size.unsqueeze(dim=1) \ + - (local_roi_size.unsqueeze(dim=1) / 2) + return roi_grid_points + + def roi_grid_pool(self, batch_size, rois, point_coords, point_features,batch_dict=None,batch_cnt=None): + + num_frames = batch_dict['num_frames'] + num_rois = rois.shape[2]*rois.shape[1] + + global_roi_proxy_points, local_roi_proxy_points = self.get_proxy_points_of_roi( + rois.permute(0,2,1,3).contiguous(), grid_size=self.grid_size + ) + + global_roi_proxy_points = global_roi_proxy_points.view(batch_size, -1, 3) + + + point_coords = point_coords.view(point_coords.shape[0]*num_frames,point_coords.shape[1]//num_frames,point_coords.shape[-1]) + xyz = point_coords[:, :, 0:3].view(-1,3) + + + num_points = point_coords.shape[1] + num_proxy_points = self.num_proxy_points + + if batch_cnt is None: + xyz_batch_cnt = torch.tensor([num_points]*num_rois*batch_size).cuda().int() + else: + xyz_batch_cnt = torch.tensor(batch_cnt).cuda().int() + + new_xyz_batch_cnt = torch.tensor([num_proxy_points]*num_rois*batch_size).cuda().int() + new_xyz = global_roi_proxy_points.view(-1, 3) + + _, pooled_features = self.roi_grid_pool_layer( + xyz=xyz.contiguous(), + xyz_batch_cnt=xyz_batch_cnt, + new_xyz=new_xyz, + new_xyz_batch_cnt=new_xyz_batch_cnt, + features=point_features.view(-1,point_features.shape[-1]).contiguous(), + ) + + features = pooled_features.view( + point_features.shape[0], num_frames*self.num_proxy_points, + pooled_features.shape[-1]).contiguous() + + return features,global_roi_proxy_points.view(batch_size*rois.shape[2], num_frames*num_proxy_points,3).contiguous() + + def get_proxy_points_of_roi(self, rois, grid_size): + rois = rois.view(-1, rois.shape[-1]) + batch_size_rcnn = rois.shape[0] + + local_roi_grid_points = self.get_dense_grid_points(rois, batch_size_rcnn, grid_size) + local_roi_grid_points = common_utils.rotate_points_along_z(local_roi_grid_points.clone(), rois[:, 6]).squeeze(dim=1) + global_center = rois[:, 0:3].clone() + global_roi_grid_points = local_roi_grid_points + global_center.unsqueeze(dim=1) + return global_roi_grid_points, local_roi_grid_points + + def spherical_coordinate(self, src, diag_dist): + assert (src.shape[-1] == 27) + device = src.device + indices_x = torch.LongTensor([0,3,6,9,12,15,18,21,24]).to(device) # + indices_y = torch.LongTensor([1,4,7,10,13,16,19,22,25]).to(device) # + indices_z = torch.LongTensor([2,5,8,11,14,17,20,23,26]).to(device) + src_x = torch.index_select(src, -1, indices_x) + src_y = torch.index_select(src, -1, indices_y) + src_z = torch.index_select(src, -1, indices_z) + dis = (src_x ** 2 + src_y ** 2 + src_z ** 2) ** 0.5 + phi = torch.atan(src_y / (src_x + 1e-5)) + the = torch.acos(src_z / (dis + 1e-5)) + dis = dis / (diag_dist + 1e-5) + src = torch.cat([dis, phi, the], dim = -1) + return src + + def crop_current_frame_points(self, src, batch_size,trajectory_rois,num_rois,batch_dict): + + for bs_idx in range(batch_size): + cur_batch_boxes = trajectory_rois[bs_idx,0,:,:7].view(-1,7) + cur_radiis = torch.sqrt((cur_batch_boxes[:,3]/2) ** 2 + (cur_batch_boxes[:,4]/2) ** 2) * 1.1 + cur_points = batch_dict['points'][(batch_dict['points'][:, 0] == bs_idx)][:,1:] + dis = torch.norm((cur_points[:,:2].unsqueeze(0) - cur_batch_boxes[:,:2].unsqueeze(1).repeat(1,cur_points.shape[0],1)), dim = 2) + point_mask = (dis <= cur_radiis.unsqueeze(-1)) + + + sampled_idx = torch.topk(point_mask.float(),128)[1] + sampled_idx_buffer = sampled_idx[:, 0:1].repeat(1, 128) + roi_idx = torch.arange(num_rois)[:, None].repeat(1, 128) + sampled_mask = point_mask[roi_idx, sampled_idx] + sampled_idx_buffer[sampled_mask] = sampled_idx[sampled_mask] + + src[bs_idx] = cur_points[sampled_idx_buffer][:,:,:5] + empty_flag = sampled_mask.sum(-1)==0 + src[bs_idx,empty_flag] = 0 + + src = src.repeat([1,1,trajectory_rois.shape[1],1]) + + return src + + def crop_previous_frame_points(self,src,batch_size,trajectory_rois,num_rois,valid_length,batch_dict): + for bs_idx in range(batch_size): + + cur_points = batch_dict['points'][(batch_dict['points'][:, 0] == bs_idx)][:,1:] + + + for idx in range(1,trajectory_rois.shape[1]): + + time_mask = (cur_points[:,-1] - idx*0.1).abs() < 1e-3 + cur_time_points = cur_points[time_mask] + cur_batch_boxes = trajectory_rois[bs_idx,idx,:,:7].view(-1,7) + + cur_radiis = torch.sqrt((cur_batch_boxes[:,3]/2) ** 2 + (cur_batch_boxes[:,4]/2) ** 2) * 1.1 + if not self.training and cur_batch_boxes.shape[0] > 32: + length_iter= cur_batch_boxes.shape[0]//32 + dis_list = [] + for i in range(length_iter+1): + dis = torch.norm((cur_time_points[:,:2].unsqueeze(0) - \ + cur_batch_boxes[32*i:32*(i+1),:2].unsqueeze(1).repeat(1,cur_time_points.shape[0],1)), dim = 2) + dis_list.append(dis) + dis = torch.cat(dis_list,0) + else: + dis = torch.norm((cur_time_points[:,:2].unsqueeze(0) - \ + cur_batch_boxes[:,:2].unsqueeze(1).repeat(1,cur_time_points.shape[0],1)), dim = 2) + + point_mask = (dis <= cur_radiis.unsqueeze(-1)).view(trajectory_rois.shape[2],-1) + + for roi_box_idx in range(0, num_rois): + + if not valid_length[bs_idx,idx,roi_box_idx]: + continue + + cur_roi_points = cur_time_points[point_mask[roi_box_idx]] + + if cur_roi_points.shape[0] > self.num_lidar_points: + np.random.seed(0) + choice = np.random.choice(cur_roi_points.shape[0], self.num_lidar_points, replace=True) + cur_roi_points_sample = cur_roi_points[choice] + + elif cur_roi_points.shape[0] == 0: + cur_roi_points_sample = cur_roi_points.new_zeros(self.num_lidar_points, 6) + + else: + empty_num = self.num_lidar_points - cur_roi_points.shape[0] + add_zeros = cur_roi_points.new_zeros(empty_num, 6) + add_zeros = cur_roi_points[0].repeat(empty_num, 1) + cur_roi_points_sample = torch.cat([cur_roi_points, add_zeros], dim = 0) + + if not self.use_time_stamp: + cur_roi_points_sample = cur_roi_points_sample[:,:-1] + + src[bs_idx, roi_box_idx, self.num_lidar_points*idx:self.num_lidar_points*(idx+1), :] = cur_roi_points_sample + + + return src + + + def get_proposal_aware_geometry_feature(self,src, batch_size,trajectory_rois,num_rois,batch_dict): + proposal_aware_feat_list = [] + for i in range(trajectory_rois.shape[1]): + + corner_points, _ = self.get_corner_points_of_roi(trajectory_rois[:,i,:,:].contiguous()) + + corner_points = corner_points.view(batch_size, num_rois, -1, corner_points.shape[-1]) + corner_points = corner_points.view(batch_size * num_rois, -1) + trajectory_roi_center = trajectory_rois[:,i,:,:].contiguous().reshape(batch_size * num_rois, -1)[:,:3] + corner_add_center_points = torch.cat([corner_points, trajectory_roi_center], dim = -1) + proposal_aware_feat = src[:,i*self.num_lidar_points:(i+1)*self.num_lidar_points,:3].repeat(1,1,9) - \ + corner_add_center_points.unsqueeze(1).repeat(1,self.num_lidar_points,1) + + lwh = trajectory_rois[:,i,:,:].reshape(batch_size * num_rois, -1)[:,3:6].unsqueeze(1).repeat(1,proposal_aware_feat.shape[1],1) + diag_dist = (lwh[:,:,0]**2 + lwh[:,:,1]**2 + lwh[:,:,2]**2) ** 0.5 + proposal_aware_feat = self.spherical_coordinate(proposal_aware_feat, diag_dist = diag_dist.unsqueeze(-1)) + proposal_aware_feat_list.append(proposal_aware_feat) + + proposal_aware_feat = torch.cat(proposal_aware_feat_list,dim=1) + proposal_aware_feat = torch.cat([proposal_aware_feat, src[:,:,3:]], dim = -1) + src_gemoetry = self.up_dimension_geometry(proposal_aware_feat) + proxy_point_geometry, proxy_points = self.roi_grid_pool(batch_size,trajectory_rois,src,src_gemoetry,batch_dict,batch_cnt=None) + return proxy_point_geometry,proxy_points + + + + def get_proposal_aware_motion_feature(self,proxy_point,batch_size,trajectory_rois,num_rois,batch_dict): + + + time_stamp = torch.ones([proxy_point.shape[0],proxy_point.shape[1],1]).cuda() + padding_zero = torch.zeros([proxy_point.shape[0],proxy_point.shape[1],2]).cuda() + proxy_point_time_padding = torch.cat([padding_zero,time_stamp],-1) + + num_frames = trajectory_rois.shape[1] + + for i in range(num_frames): + proxy_point_time_padding[:,i*self.num_proxy_points:(i+1)*self.num_proxy_points,-1] = i*0.1 + + + corner_points, _ = self.get_corner_points_of_roi(trajectory_rois[:,0,:,:].contiguous()) + corner_points = corner_points.view(batch_size, num_rois, -1, corner_points.shape[-1]) + corner_points = corner_points.view(batch_size * num_rois, -1) + trajectory_roi_center = trajectory_rois[:,0,:,:].reshape(batch_size * num_rois, -1)[:,:3] + corner_add_center_points = torch.cat([corner_points, trajectory_roi_center], dim = -1) + + proposal_aware_feat = proxy_point[:,:,:3].repeat(1,1,9) - corner_add_center_points.unsqueeze(1) + + lwh = trajectory_rois[:,0,:,:].reshape(batch_size * num_rois, -1)[:,3:6].unsqueeze(1).repeat(1,proxy_point.shape[1],1) + diag_dist = (lwh[:,:,0]**2 + lwh[:,:,1]**2 + lwh[:,:,2]**2) ** 0.5 + proposal_aware_feat = self.spherical_coordinate(proposal_aware_feat, diag_dist = diag_dist.unsqueeze(-1)) + + + proposal_aware_feat = torch.cat([proposal_aware_feat,proxy_point_time_padding],-1) + proxy_point_motion_feat = self.up_dimension_motion(proposal_aware_feat) + + return proxy_point_motion_feat + + def trajectories_auxiliary_branch(self,trajectory_rois): + + time_stamp = torch.ones([trajectory_rois.shape[0],trajectory_rois.shape[1],trajectory_rois.shape[2],1]).cuda() + for i in range(time_stamp.shape[1]): + time_stamp[:,i,:] = i*0.1 + + box_seq = torch.cat([trajectory_rois[:,:,:,:7],time_stamp],-1) + + box_seq[:, :, :,0:3] = box_seq[:, :, :,0:3] - box_seq[:, 0:1, :, 0:3] + + roi_ry = box_seq[:,:,:,6] % (2 * np.pi) + roi_ry_t0 = roi_ry[:,0] + roi_ry_t0 = roi_ry_t0.repeat(1,box_seq.shape[1]) + + + box_seq = common_utils.rotate_points_along_z( + points=box_seq.view(-1, 1, box_seq.shape[-1]), angle=-roi_ry_t0.view(-1) + ).view(box_seq.shape[0],box_seq.shape[1], -1, box_seq.shape[-1]) + + box_seq[:, :, :, 6] = 0 + + batch_rcnn = box_seq.shape[0]*box_seq.shape[2] + + box_reg, box_feat, _ = self.seqboxembed(box_seq.permute(0,2,3,1).contiguous().view(batch_rcnn,box_seq.shape[-1],box_seq.shape[1])) + + return box_reg, box_feat + + def generate_trajectory(self,cur_batch_boxes,proposals_list,batch_dict): + + trajectory_rois = cur_batch_boxes[:,None,:,:].repeat(1,batch_dict['rois'].shape[-2],1,1) + trajectory_rois[:,0,:,:]= cur_batch_boxes + valid_length = torch.zeros([batch_dict['batch_size'],batch_dict['rois'].shape[-2],trajectory_rois.shape[2]]) + valid_length[:,0] = 1 + num_frames = batch_dict['rois'].shape[-2] + for i in range(1,num_frames): + frame = torch.zeros_like(cur_batch_boxes) + frame[:,:,0:2] = trajectory_rois[:,i-1,:,0:2] + trajectory_rois[:,i-1,:,7:9] + frame[:,:,2:] = trajectory_rois[:,i-1,:,2:] + + for bs_idx in range( batch_dict['batch_size']): + iou3d = iou3d_nms_utils.boxes_iou3d_gpu(frame[bs_idx,:,:7], proposals_list[bs_idx,i,:,:7]) + max_overlaps, traj_assignment = torch.max(iou3d, dim=1) + + fg_inds = ((max_overlaps >= 0.5)).nonzero().view(-1) + + valid_length[bs_idx,i,fg_inds] = 1 + + trajectory_rois[bs_idx,i,fg_inds,:] = proposals_list[bs_idx,i,traj_assignment[fg_inds]] + + batch_dict['valid_length'] = valid_length + + return trajectory_rois,valid_length + + def forward(self, batch_dict): + """ + :param input_data: input dict + :return: + """ + + batch_dict['rois'] = batch_dict['proposals_list'].permute(0,2,1,3) + num_rois = batch_dict['rois'].shape[1] + batch_dict['num_frames'] = batch_dict['rois'].shape[2] + batch_dict['roi_scores'] = batch_dict['roi_scores'].permute(0,2,1) + batch_dict['roi_labels'] = batch_dict['roi_labels'][:,0,:].long() + proposals_list = batch_dict['proposals_list'] + batch_size = batch_dict['batch_size'] + cur_batch_boxes = copy.deepcopy(batch_dict['rois'].detach())[:,:,0] + batch_dict['cur_frame_idx'] = 0 + + trajectory_rois,valid_length = self.generate_trajectory(cur_batch_boxes,proposals_list,batch_dict) + + batch_dict['traj_memory'] = trajectory_rois + batch_dict['has_class_labels'] = True + batch_dict['trajectory_rois'] = trajectory_rois + + if self.training: + targets_dict = self.assign_targets(batch_dict) + batch_dict['rois'] = targets_dict['rois'] + batch_dict['roi_scores'] = targets_dict['roi_scores'] + batch_dict['roi_labels'] = targets_dict['roi_labels'] + targets_dict['trajectory_rois'][:,batch_dict['cur_frame_idx'],:,:] = batch_dict['rois'] + trajectory_rois = targets_dict['trajectory_rois'] + valid_length = targets_dict['valid_length'] + empty_mask = batch_dict['rois'][:,:,:6].sum(-1)==0 + + else: + empty_mask = batch_dict['rois'][:,:,0,:6].sum(-1)==0 + batch_dict['valid_traj_mask'] = ~empty_mask + + rois = batch_dict['rois'] + num_rois = batch_dict['rois'].shape[1] + num_sample = self.num_lidar_points + src = rois.new_zeros(batch_size, num_rois, num_sample, 5) + + src = self.crop_current_frame_points(src, batch_size, trajectory_rois, num_rois,batch_dict) + + src = self.crop_previous_frame_points(src, batch_size,trajectory_rois, num_rois,valid_length,batch_dict) + + src = src.view(batch_size * num_rois, -1, src.shape[-1]) + + src_geometry_feature,proxy_points = self.get_proposal_aware_geometry_feature(src,batch_size,trajectory_rois,num_rois,batch_dict) + + src_motion_feature = self.get_proposal_aware_motion_feature(proxy_points,batch_size,trajectory_rois,num_rois,batch_dict) + + src = src_geometry_feature + src_motion_feature + + box_reg, feat_box = self.trajectories_auxiliary_branch(trajectory_rois) + + if self.model_cfg.get('USE_TRAJ_EMPTY_MASK',None): + src[empty_mask.view(-1)] = 0 + + if self.model_cfg.Transformer.use_grid_pos.init_type == 'index': + pos = self.grid_pos_embeded(self.grid_index.cuda())[None,:,:] + pos = torch.cat([torch.zeros(1,1,self.hidden_dim).cuda(),pos],1) + else: + pos=None + + hs, tokens = self.transformer(src,pos=pos) + point_cls_list = [] + point_reg_list = [] + + for i in range(self.num_enc_layer): + point_cls_list.append(self.class_embed[0](tokens[i][0])) + + for i in range(hs.shape[0]): + for j in range(self.num_enc_layer): + point_reg_list.append(self.bbox_embed[i](tokens[j][i])) + + point_cls = torch.cat(point_cls_list,0) + + point_reg = torch.cat(point_reg_list,0) + hs = hs.permute(1,0,2).reshape(hs.shape[1],-1) + + joint_reg = self.jointembed(torch.cat([hs,feat_box],-1)) + + rcnn_cls = point_cls + rcnn_reg = joint_reg + + if not self.training: + batch_dict['rois'] = batch_dict['rois'][:,:,0].contiguous() + rcnn_cls = rcnn_cls[-rcnn_cls.shape[0]//self.num_enc_layer:] + batch_cls_preds, batch_box_preds = self.generate_predicted_boxes( + batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg + ) + + batch_dict['batch_box_preds'] = batch_box_preds + + batch_dict['cls_preds_normalized'] = False + if self.avg_stage1_score: + stage1_score = batch_dict['roi_scores'][:,:,:1] + batch_cls_preds = F.sigmoid(batch_cls_preds) + if self.model_cfg.get('IOU_WEIGHT', None): + batch_box_preds_list = [] + roi_labels_list = [] + batch_cls_preds_list = [] + for bs_idx in range(batch_size): + car_mask = batch_dict['roi_labels'][bs_idx] ==1 + batch_cls_preds_car = batch_cls_preds[bs_idx].pow(self.model_cfg.IOU_WEIGHT[0])* \ + stage1_score[bs_idx].pow(1-self.model_cfg.IOU_WEIGHT[0]) + batch_cls_preds_car = batch_cls_preds_car[car_mask][None] + batch_cls_preds_pedcyc = batch_cls_preds[bs_idx].pow(self.model_cfg.IOU_WEIGHT[1])* \ + stage1_score[bs_idx].pow(1-self.model_cfg.IOU_WEIGHT[1]) + batch_cls_preds_pedcyc = batch_cls_preds_pedcyc[~car_mask][None] + cls_preds = torch.cat([batch_cls_preds_car,batch_cls_preds_pedcyc],1) + box_preds = torch.cat([batch_dict['batch_box_preds'][bs_idx][car_mask], + batch_dict['batch_box_preds'][bs_idx][~car_mask]],0)[None] + roi_labels = torch.cat([batch_dict['roi_labels'][bs_idx][car_mask], + batch_dict['roi_labels'][bs_idx][~car_mask]],0)[None] + batch_box_preds_list.append(box_preds) + roi_labels_list.append(roi_labels) + batch_cls_preds_list.append(cls_preds) + batch_dict['batch_box_preds'] = torch.cat(batch_box_preds_list,0) + batch_dict['roi_labels'] = torch.cat(roi_labels_list,0) + batch_cls_preds = torch.cat(batch_cls_preds_list,0) + + else: + batch_cls_preds = torch.sqrt(batch_cls_preds*stage1_score) + batch_dict['cls_preds_normalized'] = True + + batch_dict['batch_cls_preds'] = batch_cls_preds + + + else: + targets_dict['batch_size'] = batch_size + targets_dict['rcnn_cls'] = rcnn_cls + targets_dict['rcnn_reg'] = rcnn_reg + targets_dict['box_reg'] = box_reg + targets_dict['point_reg'] = point_reg + targets_dict['point_cls'] = point_cls + self.forward_ret_dict = targets_dict + + return batch_dict + + def get_loss(self, tb_dict=None): + tb_dict = {} if tb_dict is None else tb_dict + rcnn_loss = 0 + rcnn_loss_cls, cls_tb_dict = self.get_box_cls_layer_loss(self.forward_ret_dict) + rcnn_loss += rcnn_loss_cls + tb_dict.update(cls_tb_dict) + + rcnn_loss_reg, reg_tb_dict = self.get_box_reg_layer_loss(self.forward_ret_dict) + rcnn_loss += rcnn_loss_reg + tb_dict.update(reg_tb_dict) + tb_dict['rcnn_loss'] = rcnn_loss.item() + return rcnn_loss, tb_dict + + def get_box_reg_layer_loss(self, forward_ret_dict): + loss_cfgs = self.model_cfg.LOSS_CONFIG + code_size = self.box_coder.code_size + reg_valid_mask = forward_ret_dict['reg_valid_mask'].view(-1) + batch_size = forward_ret_dict['batch_size'] + + gt_boxes3d_ct = forward_ret_dict['gt_of_rois'][..., 0:code_size] + gt_of_rois_src = forward_ret_dict['gt_of_rois_src'][..., 0:code_size].view(-1, code_size) + + rcnn_reg = forward_ret_dict['rcnn_reg'] + + roi_boxes3d = forward_ret_dict['rois'] + + rcnn_batch_size = gt_boxes3d_ct.view(-1, code_size).shape[0] + + fg_mask = (reg_valid_mask > 0) + fg_sum = fg_mask.long().sum().item() + + tb_dict = {} + + if loss_cfgs.REG_LOSS == 'smooth-l1': + + rois_anchor = roi_boxes3d.clone().detach()[:,:,:7].contiguous().view(-1, code_size) + rois_anchor[:, 0:3] = 0 + rois_anchor[:, 6] = 0 + reg_targets = self.box_coder.encode_torch( + gt_boxes3d_ct.view(rcnn_batch_size, code_size), rois_anchor + ) + rcnn_loss_reg = self.reg_loss_func( + rcnn_reg.view(rcnn_batch_size, -1).unsqueeze(dim=0), + reg_targets.unsqueeze(dim=0), + ) # [B, M, 7] + rcnn_loss_reg = (rcnn_loss_reg.view(rcnn_batch_size, -1) * fg_mask.unsqueeze(dim=-1).float()).sum() / max(fg_sum, 1) + rcnn_loss_reg = rcnn_loss_reg * loss_cfgs.LOSS_WEIGHTS['rcnn_reg_weight']*loss_cfgs.LOSS_WEIGHTS['traj_reg_weight'][0] + + tb_dict['rcnn_loss_reg'] = rcnn_loss_reg.item() + + if self.model_cfg.USE_AUX_LOSS: + point_reg = forward_ret_dict['point_reg'] + + groups = point_reg.shape[0]//reg_targets.shape[0] + if groups != 1 : + point_loss_regs = 0 + slice = reg_targets.shape[0] + for i in range(groups): + point_loss_reg = self.reg_loss_func( + point_reg[i*slice:(i+1)*slice].view(slice, -1).unsqueeze(dim=0),reg_targets.unsqueeze(dim=0),) + point_loss_reg = (point_loss_reg.view(slice, -1) * fg_mask.unsqueeze(dim=-1).float()).sum() / max(fg_sum, 1) + point_loss_reg = point_loss_reg * loss_cfgs.LOSS_WEIGHTS['rcnn_reg_weight']*loss_cfgs.LOSS_WEIGHTS['traj_reg_weight'][2] + + point_loss_regs += point_loss_reg + point_loss_regs = point_loss_regs / groups + tb_dict['point_loss_reg'] = point_loss_regs.item() + rcnn_loss_reg += point_loss_regs + + else: + point_loss_reg = self.reg_loss_func(point_reg.view(rcnn_batch_size, -1).unsqueeze(dim=0),reg_targets.unsqueeze(dim=0),) + point_loss_reg = (point_loss_reg.view(rcnn_batch_size, -1) * fg_mask.unsqueeze(dim=-1).float()).sum() / max(fg_sum, 1) + point_loss_reg = point_loss_reg * loss_cfgs.LOSS_WEIGHTS['rcnn_reg_weight']*loss_cfgs.LOSS_WEIGHTS['traj_reg_weight'][2] + tb_dict['point_loss_reg'] = point_loss_reg.item() + rcnn_loss_reg += point_loss_reg + + seqbox_reg = forward_ret_dict['box_reg'] + seqbox_loss_reg = self.reg_loss_func(seqbox_reg.view(rcnn_batch_size, -1).unsqueeze(dim=0),reg_targets.unsqueeze(dim=0),) + seqbox_loss_reg = (seqbox_loss_reg.view(rcnn_batch_size, -1) * fg_mask.unsqueeze(dim=-1).float()).sum() / max(fg_sum, 1) + seqbox_loss_reg = seqbox_loss_reg * loss_cfgs.LOSS_WEIGHTS['rcnn_reg_weight']*loss_cfgs.LOSS_WEIGHTS['traj_reg_weight'][1] + tb_dict['seqbox_loss_reg'] = seqbox_loss_reg.item() + rcnn_loss_reg += seqbox_loss_reg + + if loss_cfgs.CORNER_LOSS_REGULARIZATION and fg_sum > 0: + + fg_rcnn_reg = rcnn_reg.view(rcnn_batch_size, -1)[fg_mask] + fg_roi_boxes3d = roi_boxes3d[:,:,:7].contiguous().view(-1, code_size)[fg_mask] + + fg_roi_boxes3d = fg_roi_boxes3d.view(1, -1, code_size) + batch_anchors = fg_roi_boxes3d.clone().detach() + roi_ry = fg_roi_boxes3d[:, :, 6].view(-1) + roi_xyz = fg_roi_boxes3d[:, :, 0:3].view(-1, 3) + batch_anchors[:, :, 0:3] = 0 + rcnn_boxes3d = self.box_coder.decode_torch( + fg_rcnn_reg.view(batch_anchors.shape[0], -1, code_size), batch_anchors + ).view(-1, code_size) + + rcnn_boxes3d = common_utils.rotate_points_along_z( + rcnn_boxes3d.unsqueeze(dim=1), roi_ry + ).squeeze(dim=1) + rcnn_boxes3d[:, 0:3] += roi_xyz + + corner_loss_func = loss_utils.get_corner_loss_lidar + + loss_corner = corner_loss_func( + rcnn_boxes3d[:, 0:7], + gt_of_rois_src[fg_mask][:, 0:7]) + + loss_corner = loss_corner.mean() + loss_corner = loss_corner * loss_cfgs.LOSS_WEIGHTS['rcnn_corner_weight'] + + rcnn_loss_reg += loss_corner + tb_dict['rcnn_loss_corner'] = loss_corner.item() + + else: + raise NotImplementedError + + return rcnn_loss_reg, tb_dict + + def get_box_cls_layer_loss(self, forward_ret_dict): + loss_cfgs = self.model_cfg.LOSS_CONFIG + rcnn_cls = forward_ret_dict['rcnn_cls'] + rcnn_cls_labels = forward_ret_dict['rcnn_cls_labels'].view(-1) + + if loss_cfgs.CLS_LOSS == 'BinaryCrossEntropy': + + rcnn_cls_flat = rcnn_cls.view(-1) + + groups = rcnn_cls_flat.shape[0] // rcnn_cls_labels.shape[0] + if groups != 1: + rcnn_loss_cls = 0 + slice = rcnn_cls_labels.shape[0] + for i in range(groups): + batch_loss_cls = F.binary_cross_entropy(torch.sigmoid(rcnn_cls_flat[i*slice:(i+1)*slice]), + rcnn_cls_labels.float(), reduction='none') + + cls_valid_mask = (rcnn_cls_labels >= 0).float() + rcnn_loss_cls = rcnn_loss_cls + (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0) + + rcnn_loss_cls = rcnn_loss_cls / groups + + else: + + batch_loss_cls = F.binary_cross_entropy(torch.sigmoid(rcnn_cls_flat), rcnn_cls_labels.float(), reduction='none') + cls_valid_mask = (rcnn_cls_labels >= 0).float() + rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0) + + + elif loss_cfgs.CLS_LOSS == 'CrossEntropy': + batch_loss_cls = F.cross_entropy(rcnn_cls, rcnn_cls_labels, reduction='none', ignore_index=-1) + cls_valid_mask = (rcnn_cls_labels >= 0).float() + rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0) + + else: + raise NotImplementedError + + rcnn_loss_cls = rcnn_loss_cls * loss_cfgs.LOSS_WEIGHTS['rcnn_cls_weight'] + + tb_dict = {'rcnn_loss_cls': rcnn_loss_cls.item()} + return rcnn_loss_cls, tb_dict + + + def generate_predicted_boxes(self, batch_size, rois, cls_preds=None, box_preds=None): + """ + Args: + batch_size: + rois: (B, N, 7) + cls_preds: (BN, num_class) + box_preds: (BN, code_size) + Returns: + """ + code_size = self.box_coder.code_size + if cls_preds is not None: + batch_cls_preds = cls_preds.view(batch_size, -1, cls_preds.shape[-1]) + else: + batch_cls_preds = None + batch_box_preds = box_preds.view(batch_size, -1, code_size) + + roi_ry = rois[:, :, 6].view(-1) + roi_xyz = rois[:, :, 0:3].view(-1, 3) + local_rois = rois.clone().detach() + local_rois[:, :, 0:3] = 0 + + batch_box_preds = self.box_coder.decode_torch(batch_box_preds, local_rois).view(-1, code_size) + + batch_box_preds = common_utils.rotate_points_along_z( + batch_box_preds.unsqueeze(dim=1), roi_ry + ).squeeze(dim=1) + + batch_box_preds[:, 0:3] += roi_xyz + batch_box_preds = batch_box_preds.view(batch_size, -1, code_size) + batch_box_preds = torch.cat([batch_box_preds,rois[:,:,7:]],-1) + return batch_cls_preds, batch_box_preds diff --git a/toolbox/openpcdet/pcdet/models/roi_heads/mppnet_memory_bank_e2e.py b/toolbox/openpcdet/pcdet/models/roi_heads/mppnet_memory_bank_e2e.py new file mode 100644 index 000000000..b8af7f37a --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/roi_heads/mppnet_memory_bank_e2e.py @@ -0,0 +1,581 @@ +from typing import ValuesView +import torch.nn as nn +import torch +import numpy as np +import copy +import torch.nn.functional as F +from pcdet.ops.iou3d_nms import iou3d_nms_utils +from ...utils import common_utils, loss_utils +from .roi_head_template import RoIHeadTemplate +from ..model_utils.mppnet_utils import build_transformer, PointNet, MLP +from .target_assigner.proposal_target_layer import ProposalTargetLayer +from pcdet.ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules + + +class MPPNetHeadE2E(RoIHeadTemplate): + def __init__(self,model_cfg, num_class=1,**kwargs): + super().__init__(num_class=num_class, model_cfg=model_cfg) + self.model_cfg = model_cfg + self.use_time_stamp = self.model_cfg.get('USE_TIMESTAMP',None) + self.num_lidar_points = self.model_cfg.Transformer.num_lidar_points + self.avg_stage1_score = self.model_cfg.get('AVG_STAGE1_SCORE', None) + + self.nhead = model_cfg.Transformer.nheads + self.num_enc_layer = model_cfg.Transformer.enc_layers + hidden_dim = model_cfg.TRANS_INPUT + self.hidden_dim = model_cfg.TRANS_INPUT + self.num_groups = model_cfg.Transformer.num_groups + + self.grid_size = model_cfg.ROI_GRID_POOL.GRID_SIZE + self.num_proxy_points = model_cfg.Transformer.num_proxy_points + + self.seqboxembed = PointNet(8,model_cfg=self.model_cfg) + self.jointembed = MLP(self.hidden_dim*(self.num_groups+1), model_cfg.Transformer.hidden_dim, self.box_coder.code_size * self.num_class, 4) + + num_radius = len(self.model_cfg.ROI_GRID_POOL.POOL_RADIUS) + self.up_dimension_geometry = MLP(input_dim = 29, hidden_dim = 64, output_dim =hidden_dim//num_radius, num_layers = 3) + self.up_dimension_motion = MLP(input_dim = 30, hidden_dim = 64, output_dim = hidden_dim, num_layers = 3) + + self.transformer = build_transformer(model_cfg.Transformer) + + self.roi_grid_pool_layer = pointnet2_stack_modules.StackSAModuleMSG( + radii=self.model_cfg.ROI_GRID_POOL.POOL_RADIUS, + nsamples=self.model_cfg.ROI_GRID_POOL.NSAMPLE, + mlps=self.model_cfg.ROI_GRID_POOL.MLPS, + use_xyz=True, + pool_method=self.model_cfg.ROI_GRID_POOL.POOL_METHOD, + ) + + self.class_embed = nn.ModuleList() + self.class_embed.append(nn.Linear(model_cfg.Transformer.hidden_dim, 1)) + + self.bbox_embed = nn.ModuleList() + for _ in range(self.num_groups): + self.bbox_embed.append(MLP(model_cfg.Transformer.hidden_dim, model_cfg.Transformer.hidden_dim, self.box_coder.code_size * self.num_class, 4)) + + if self.model_cfg.Transformer.use_grid_pos.enabled: + if self.model_cfg.Transformer.use_grid_pos.init_type == 'index': + self.grid_index = torch.cat([i.reshape(-1,1)for i in torch.meshgrid(torch.arange(self.grid_size), torch.arange(self.grid_size), torch.arange(self.grid_size))],1).float().cuda() + self.grid_pos_embeded = MLP(input_dim = 3, hidden_dim = 256, output_dim = hidden_dim, num_layers = 2) + else: + self.pos = nn.Parameter(torch.zeros(1, self.num_grid_points, 256)) + + def init_weights(self, weight_init='xavier'): + if weight_init == 'kaiming': + init_func = nn.init.kaiming_normal_ + elif weight_init == 'xavier': + init_func = nn.init.xavier_normal_ + elif weight_init == 'normal': + init_func = nn.init.normal_ + else: + raise NotImplementedError + + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d): + if weight_init == 'normal': + init_func(m.weight, mean=0, std=0.001) + else: + init_func(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + nn.init.normal_(self.bbox_embed.layers[-1].weight, mean=0, std=0.001) + + def get_corner_points_of_roi(self, rois): + rois = rois.view(-1, rois.shape[-1]) + batch_size_rcnn = rois.shape[0] + + local_roi_grid_points = self.get_corner_points(rois, batch_size_rcnn) + local_roi_grid_points = common_utils.rotate_points_along_z( + local_roi_grid_points.clone(), rois[:, 6] + ).squeeze(dim=1) + global_center = rois[:, 0:3].clone() + + + global_roi_grid_points = local_roi_grid_points + global_center.unsqueeze(dim=1) + return global_roi_grid_points, local_roi_grid_points + + @staticmethod + def get_dense_grid_points(rois, batch_size_rcnn, grid_size): + if isinstance(grid_size,list): + faked_features = rois.new_ones((grid_size[0], grid_size[1], grid_size[2])) + grid_size = torch.tensor(grid_size).float().cuda() + else: + faked_features = rois.new_ones((grid_size, grid_size, grid_size)) + dense_idx = faked_features.nonzero() + dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() + + local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6] + roi_grid_points = torch.div((dense_idx + 0.5), grid_size) * local_roi_size.unsqueeze(dim=1) - (local_roi_size.unsqueeze(dim=1) / 2) + return roi_grid_points + + @staticmethod + def get_corner_points(rois, batch_size_rcnn): + faked_features = rois.new_ones((2, 2, 2)) + + dense_idx = faked_features.nonzero() + dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() + + local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6] + roi_grid_points = dense_idx * local_roi_size.unsqueeze(dim=1) \ + - (local_roi_size.unsqueeze(dim=1) / 2) + return roi_grid_points + + def get_proxy_points_of_roi(self, rois, grid_size): + rois = rois.view(-1, rois.shape[-1]) + batch_size_rcnn = rois.shape[0] + + local_roi_grid_points = self.get_dense_grid_points(rois, batch_size_rcnn, grid_size) + local_roi_grid_points = common_utils.rotate_points_along_z(local_roi_grid_points.clone(), rois[:, 6]).squeeze(dim=1) + global_center = rois[:, 0:3].clone() + global_roi_grid_points = local_roi_grid_points + global_center.unsqueeze(dim=1) + return global_roi_grid_points, local_roi_grid_points + + def roi_grid_pool(self, batch_size, rois, point_coords, point_features,batch_dict=None,batch_cnt=None): + """ + Args: + batch_dict: + batch_size: + rois: (B, num_rois, 7 + C) + point_coords: (num_points, 4) [bs_idx, x, y, z] + point_features: (num_points, C) + point_cls_scores: (N1 + N2 + N3 + ..., 1) + point_part_offset: (N1 + N2 + N3 + ..., 3) + Returns: + """ + + num_frames = batch_dict['num_frames'] + num_rois = rois.shape[2]*rois.shape[1] + + global_roi_proxy_points, local_roi_proxy_points = self.get_proxy_points_of_roi( + rois.permute(0,2,1,3).contiguous(), grid_size=self.grid_size + ) + + num_points = point_coords.shape[1] + num_proxy_points = self.num_proxy_points + + xyz = point_coords[:, :, 0:3].view(-1,3) + if batch_cnt is None: + xyz_batch_cnt = torch.tensor([num_points]*rois.shape[2]*batch_size).cuda().int() + else: + xyz_batch_cnt = torch.tensor(batch_cnt).cuda().int() + new_xyz = torch.cat([i[0] for i in global_roi_proxy_points.chunk(rois.shape[2],0)],0) + new_xyz_batch_cnt = torch.tensor([self.num_proxy_points]*rois.shape[2]*batch_size).cuda().int() + + _, pooled_features = self.roi_grid_pool_layer( + xyz=xyz.contiguous(), + xyz_batch_cnt=xyz_batch_cnt, + new_xyz=new_xyz, + new_xyz_batch_cnt=new_xyz_batch_cnt, + features=point_features.view(-1,point_features.shape[-1]).contiguous(), + ) + + features = pooled_features.view( + point_features.shape[0], self.num_proxy_points, + pooled_features.shape[-1] + ).contiguous() + + return features,global_roi_proxy_points.view(batch_size*rois.shape[2], num_frames*num_proxy_points,3).contiguous() + + def spherical_coordinate(self, src, diag_dist): + + assert (src.shape[-1] == 27) + device = src.device + indices_x = torch.LongTensor([0,3,6,9,12,15,18,21,24]).to(device) # + indices_y = torch.LongTensor([1,4,7,10,13,16,19,22,25]).to(device) # + indices_z = torch.LongTensor([2,5,8,11,14,17,20,23,26]).to(device) + src_x = torch.index_select(src, -1, indices_x) + src_y = torch.index_select(src, -1, indices_y) + src_z = torch.index_select(src, -1, indices_z) + dis = (src_x ** 2 + src_y ** 2 + src_z ** 2) ** 0.5 + phi = torch.atan(src_y / (src_x + 1e-5)) + the = torch.acos(src_z / (dis + 1e-5)) + dis = dis / (diag_dist + 1e-5) + src = torch.cat([dis, phi, the], dim = -1) + return src + + def crop_current_frame_points(self, src, batch_size,trajectory_rois,num_rois,num_sample, batch_dict): + + for bs_idx in range(batch_size): + + cur_batch_boxes = trajectory_rois[bs_idx,0,:,:7].view(-1,7) + cur_radiis = torch.sqrt((cur_batch_boxes[:,3]/2) ** 2 + (cur_batch_boxes[:,4]/2) ** 2) * 1.1 + cur_points = batch_dict['points'][(batch_dict['points'][:, 0] == bs_idx)][:,1:] + time_mask = cur_points[:,-1].abs() < 1e-3 + cur_points = cur_points[time_mask] + dis = torch.norm((cur_points[:,:2].unsqueeze(0) - cur_batch_boxes[:,:2].unsqueeze(1).repeat(1,cur_points.shape[0],1)), dim = 2) + point_mask = (dis <= cur_radiis.unsqueeze(-1)) + + mask = point_mask + sampled_idx = torch.topk(mask.float(),128)[1] + sampled_idx_buffer = sampled_idx[:, 0:1].repeat(1, 128) + roi_idx = torch.arange(num_rois)[:, None].repeat(1, 128) + sampled_mask = mask[roi_idx, sampled_idx] + sampled_idx_buffer[sampled_mask] = sampled_idx[sampled_mask] + + src[bs_idx] = cur_points[sampled_idx_buffer][:,:,:5] + empty_flag = sampled_mask.sum(-1)==0 + src[bs_idx,empty_flag] = 0 + + return src + + def trajectories_auxiliary_branch(self,trajectory_rois): + + time_stamp = torch.ones([trajectory_rois.shape[0],trajectory_rois.shape[1],trajectory_rois.shape[2],1]).cuda() + for i in range(time_stamp.shape[1]): + time_stamp[:,i,:] = i*0.1 + + box_seq = torch.cat([trajectory_rois[:,:,:,:7],time_stamp],-1) + box_seq[:, :, :,0:3] = box_seq[:, :, :,0:3] - box_seq[:, 0:1, :, 0:3] + + + roi_ry = box_seq[:,:,:,6] % (2 * np.pi) + roi_ry_t0 = roi_ry[:,0] + roi_ry_t0 = roi_ry_t0.repeat(1,box_seq.shape[1]) + + # transfer LiDAR coords to local coords + box_seq = common_utils.rotate_points_along_z( + points=box_seq.view(-1, 1, box_seq.shape[-1]), angle=-roi_ry_t0.view(-1) + ).view(box_seq.shape[0],box_seq.shape[1], -1, box_seq.shape[-1]) + + box_seq[:,:,:,6] = 0 + + batch_rcnn = box_seq.shape[0]*box_seq.shape[2] + + box_reg, box_feat, _ = self.seqboxembed(box_seq.permute(0,2,3,1).contiguous().view(batch_rcnn,box_seq.shape[-1],box_seq.shape[1])) + + return box_reg, box_feat + + def get_proposal_aware_motion_feature(self,proxy_point,batch_size,trajectory_rois,num_rois,batch_dict): + + time_stamp = torch.ones([proxy_point.shape[0],proxy_point.shape[1],1]).cuda() + padding_zero = torch.zeros([proxy_point.shape[0],proxy_point.shape[1],2]).cuda() + proxy_point_padding = torch.cat([padding_zero,time_stamp],-1) + + num_time_coding = trajectory_rois.shape[1] + + for i in range(num_time_coding): + proxy_point_padding[:,i*self.num_proxy_points:(i+1)*self.num_proxy_points,-1] = i*0.1 + + + ######### use T0 Norm ######## + corner_points, _ = self.get_corner_points_of_roi(trajectory_rois[:,0,:,:].contiguous()) + corner_points = corner_points.view(batch_size, num_rois, -1, corner_points.shape[-1]) + corner_points = corner_points.view(batch_size * num_rois, -1) + corner_add_center_points = torch.cat([corner_points, trajectory_rois[:,0,:,:].reshape(batch_size * num_rois, -1)[:,:3]], dim = -1) + + pos_fea = proxy_point[:,:,:3].repeat(1,1,9) - corner_add_center_points.unsqueeze(1) + + lwh = trajectory_rois[:,0,:,:].reshape(batch_size * num_rois, -1)[:,3:6].unsqueeze(1).repeat(1,proxy_point.shape[1],1) + diag_dist = (lwh[:,:,0]**2 + lwh[:,:,1]**2 + lwh[:,:,2]**2) ** 0.5 + pos_fea = self.spherical_coordinate(pos_fea, diag_dist = diag_dist.unsqueeze(-1)) + ######### use T0 Norm ######## + + proxy_point_padding = torch.cat([pos_fea,proxy_point_padding],-1) + proxy_point_motion_feat = self.up_dimension_motion(proxy_point_padding) + + return proxy_point_motion_feat + + def get_proposal_aware_geometry_feature(self,src, batch_size,trajectory_rois,num_rois,batch_dict): + + i = 0 # only current frame + corner_points, _ = self.get_corner_points_of_roi(trajectory_rois[:,i,:,:].contiguous()) + + corner_points = corner_points.view(batch_size, num_rois, -1, corner_points.shape[-1]) + corner_points = corner_points.view(batch_size * num_rois, -1) + trajectory_roi_center = trajectory_rois[:,i,:,:].contiguous().reshape(batch_size * num_rois, -1)[:,:3] + corner_add_center_points = torch.cat([corner_points, trajectory_roi_center], dim = -1) + proposal_aware_feat = src[:,i*self.num_lidar_points:(i+1)*self.num_lidar_points,:3].repeat(1,1,9) - \ + corner_add_center_points.unsqueeze(1).repeat(1,self.num_lidar_points,1) + + lwh = trajectory_rois[:,i,:,:].reshape(batch_size * num_rois, -1)[:,3:6].unsqueeze(1).repeat(1,proposal_aware_feat.shape[1],1) + diag_dist = (lwh[:,:,0]**2 + lwh[:,:,1]**2 + lwh[:,:,2]**2) ** 0.5 + proposal_aware_feat = self.spherical_coordinate(proposal_aware_feat, diag_dist = diag_dist.unsqueeze(-1)) + + proposal_aware_feat = torch.cat([proposal_aware_feat, src[:,:,3:]], dim = -1) + src_gemoetry = self.up_dimension_geometry(proposal_aware_feat) + proxy_point_geometry, proxy_points = self.roi_grid_pool(batch_size,trajectory_rois,src,src_gemoetry,batch_dict,batch_cnt=None) + return proxy_point_geometry,proxy_points + + @staticmethod + def reorder_rois_for_refining(pred_bboxes): + + num_max_rois = max([len(bbox) for bbox in pred_bboxes]) + num_max_rois = max(1, num_max_rois) # at least one faked rois to avoid error + ordered_bboxes = torch.zeros([len(pred_bboxes),num_max_rois,pred_bboxes[0].shape[-1]]).cuda() + + for bs_idx in range(ordered_bboxes.shape[0]): + ordered_bboxes[bs_idx,:len(pred_bboxes[bs_idx])] = pred_bboxes[bs_idx] + return ordered_bboxes + + def transform_prebox_to_current_vel(self,pred_boxes3d,pose_pre,pose_cur): + + expand_bboxes = np.concatenate([pred_boxes3d[:,:3], np.ones((pred_boxes3d.shape[0], 1))], axis=-1) + expand_vels = np.concatenate([pred_boxes3d[:,7:9], np.zeros((pred_boxes3d.shape[0], 1))], axis=-1) + bboxes_global = np.dot(expand_bboxes, pose_pre.T)[:, :3] + vels_global = np.dot(expand_vels, pose_pre[:3,:3].T) + moved_bboxes_global = copy.deepcopy(bboxes_global) + moved_bboxes_global[:,:2] = moved_bboxes_global[:,:2] - 0.1*vels_global[:,:2] + + expand_bboxes_global = np.concatenate([bboxes_global[:,:3],np.ones((bboxes_global.shape[0], 1))], axis=-1) + expand_moved_bboxes_global = np.concatenate([moved_bboxes_global[:,:3],np.ones((bboxes_global.shape[0], 1))], axis=-1) + bboxes_pre2cur = np.dot(expand_bboxes_global, np.linalg.inv(pose_cur.T))[:, :3] + + moved_bboxes_pre2cur = np.dot(expand_moved_bboxes_global, np.linalg.inv(pose_cur.T))[:, :3] + vels_pre2cur = np.dot(vels_global, np.linalg.inv(pose_cur[:3,:3].T))[:,:2] + bboxes_pre2cur = np.concatenate([bboxes_pre2cur, pred_boxes3d[:,3:7],vels_pre2cur],axis=-1) + bboxes_pre2cur[:,6] = bboxes_pre2cur[..., 6] + np.arctan2(pose_pre[1, 0], pose_pre[0,0]) + bboxes_pre2cur[:,6] = bboxes_pre2cur[..., 6] - np.arctan2(pose_cur[1, 0], pose_cur[0,0]) + bboxes_pre2cur[:,7:9] = moved_bboxes_pre2cur[:,:2] - bboxes_pre2cur[:,:2] + return bboxes_pre2cur[None,:,:] + + def generate_trajectory(self,cur_batch_boxes,proposals_list,batch_dict): + + trajectory_rois = cur_batch_boxes[:,None,:,:].repeat(1,batch_dict['rois'].shape[-2],1,1) + trajectory_rois[:,0,:,:]= cur_batch_boxes + valid_length = torch.zeros([batch_dict['batch_size'],batch_dict['rois'].shape[-2],trajectory_rois.shape[2]]) + valid_length[:,0] = 1 + num_frames = batch_dict['rois'].shape[-2] + matching_table = (trajectory_rois.new_ones([trajectory_rois.shape[1],trajectory_rois.shape[2]]) * -1).long() + + for i in range(1,num_frames): + frame = torch.zeros_like(cur_batch_boxes) + frame[:,:,0:2] = trajectory_rois[:,i-1,:,0:2] + trajectory_rois[:,i-1,:,7:9] + frame[:,:,2:] = trajectory_rois[:,i-1,:,2:] + + for bs_idx in range( batch_dict['batch_size']): + iou3d = iou3d_nms_utils.boxes_iou3d_gpu(frame[bs_idx,:,:7], proposals_list[bs_idx,i,:,:7]) + max_overlaps, traj_assignment = torch.max(iou3d, dim=1) + + fg_inds = ((max_overlaps >= 0.5)).nonzero().view(-1) + + valid_length[bs_idx,i,fg_inds] = 1 + matching_table[i,fg_inds] = traj_assignment[fg_inds] + + trajectory_rois[bs_idx,i,fg_inds,:] = proposals_list[bs_idx,i,traj_assignment[fg_inds]] + + batch_dict['valid_length'] = valid_length + + return trajectory_rois,valid_length, matching_table + + def forward(self, batch_dict): + """ + :param input_data: input dict + :return: + """ + + if 'memory_bank' in batch_dict.keys(): + + rois_list = [] + memory_list = copy.deepcopy(batch_dict['memory_bank']) + + for idx in range(len(memory_list['rois'])): + + rois = torch.cat([batch_dict['memory_bank']['rois'][idx][0], + batch_dict['memory_bank']['roi_scores'][idx][0], + batch_dict['memory_bank']['roi_labels'][idx][0]],-1) + + rois_list.append(rois) + + batch_rois = self.reorder_rois_for_refining(rois_list) + batch_dict['roi_scores'] = batch_rois[None,:,:,9] + batch_dict['roi_labels'] = batch_rois[None,:,:,10] + + proposals_list = [] + + for i in range(self.model_cfg.Transformer.num_frames): + pose_pre = batch_dict['poses'][0,i*4:(i+1)*4,:] + pred2cur = self.transform_prebox_to_current_vel(batch_rois[i,:,:9].cpu().numpy(),pose_pre=pose_pre.cpu().numpy(), + pose_cur=batch_dict['poses'][0,:4,:].cpu().numpy()) + proposals_list.append(torch.from_numpy(pred2cur).cuda().float()) + batch_rois = torch.cat(proposals_list,0) + batch_dict['proposals_list'] = batch_rois[None,:,:,:9] + + batch_dict['rois'] = batch_rois.unsqueeze(0).permute(0,2,1,3) + num_rois = batch_dict['rois'].shape[1] + batch_dict['num_frames'] = batch_dict['rois'].shape[2] + roi_labels_list = copy.deepcopy(batch_dict['roi_labels']) + + batch_dict['roi_scores'] = batch_dict['roi_scores'].permute(0,2,1) + batch_dict['roi_labels'] = batch_dict['roi_labels'][:,0,:].long() + proposals_list = batch_dict['proposals_list'] + batch_size = batch_dict['batch_size'] + cur_batch_boxes = copy.deepcopy(batch_dict['rois'].detach())[:,:,0] + batch_dict['cur_frame_idx'] = 0 + + else: + + batch_dict['rois'] = batch_dict['proposals_list'].permute(0,2,1,3) + assert batch_dict['rois'].shape[0] ==1 + num_rois = batch_dict['rois'].shape[1] + batch_dict['num_frames'] = batch_dict['rois'].shape[2] + roi_labels_list = copy.deepcopy(batch_dict['roi_labels']) + + batch_dict['roi_scores'] = batch_dict['roi_scores'].permute(0,2,1) + batch_dict['roi_labels'] = batch_dict['roi_labels'][:,0,:].long() + proposals_list = batch_dict['proposals_list'] + batch_size = batch_dict['batch_size'] + cur_batch_boxes = copy.deepcopy(batch_dict['rois'].detach())[:,:,0] + batch_dict['cur_frame_idx'] = 0 + + trajectory_rois,effective_length,matching_table = self.generate_trajectory(cur_batch_boxes,proposals_list,batch_dict) + + + batch_dict['has_class_labels'] = True + batch_dict['trajectory_rois'] = trajectory_rois + + + rois = batch_dict['rois'] + num_rois = batch_dict['rois'].shape[1] + + if self.model_cfg.get('USE_TRAJ_EMPTY_MASK',None): + empty_mask = batch_dict['rois'][:,:,0,:6].sum(-1)==0 + batch_dict['valid_traj_mask'] = ~empty_mask + + num_sample = self.num_lidar_points + + src = rois.new_zeros(batch_size, num_rois, num_sample, 5) + + src = self.crop_current_frame_points(src, batch_size, trajectory_rois, num_rois, num_sample, batch_dict) + + src = src.view(batch_size * num_rois, -1, src.shape[-1]) + + src_geometry_feature,proxy_points = self.get_proposal_aware_geometry_feature(src,batch_size,trajectory_rois,num_rois,batch_dict) + + src_motion_feature = self.get_proposal_aware_motion_feature(proxy_points,batch_size,trajectory_rois,num_rois,batch_dict) + + + if batch_dict['sample_idx'][0] >=1: + + src_repeat = src_geometry_feature[:,None,:self.num_proxy_points,:].repeat([1,trajectory_rois.shape[1],1,1]) + src_before = src_repeat[:,1:,:,:].clone() #[bs,traj,num_roi,C] + valid_length = batch_dict['num_frames'] -1 if batch_dict['sample_idx'][0] > batch_dict['num_frames'] -1 \ + else int(batch_dict['sample_idx'][0].item()) + num_max_rois = max(trajectory_rois.shape[2], *[i.shape[0] for i in batch_dict['memory_bank']['feature_bank']]) + feature_bank = self.reorder_memory(batch_dict['memory_bank']['feature_bank'][:valid_length],num_max_rois) + effective_length = effective_length[0,1:1+valid_length].bool() #rm dim of bs + for i in range(valid_length): + src_before[:,i][effective_length[i]] = feature_bank[i,matching_table[1+i][effective_length[i]]] + + src_geometry_feature = torch.cat([src_repeat[:,:1],src_before],1).view(src_geometry_feature.shape[0],-1, + src_geometry_feature.shape[-1]) + + else: + + src_geometry_feature = src_geometry_feature.repeat([1,trajectory_rois.shape[1],1]) + + batch_dict['geometory_feature_memory'] = src_geometry_feature[:,:self.num_proxy_points] + + + src = src_geometry_feature + src_motion_feature + + + if self.model_cfg.get('USE_TRAJ_EMPTY_MASK',None): + src[empty_mask.view(-1)] = 0 + + if self.model_cfg.Transformer.use_grid_pos.init_type == 'index': + pos = self.grid_pos_embeded(self.grid_index.cuda())[None,:,:] + pos = torch.cat([torch.zeros(1,1,self.hidden_dim).cuda(),pos],1) + else: + pos=None + + hs, tokens = self.transformer(src,pos=pos) + point_cls_list = [] + + for i in range(self.num_enc_layer): + point_cls_list.append(self.class_embed[0](tokens[i][0])) + + point_cls = torch.cat(point_cls_list,0) + + hs = hs.permute(1,0,2).reshape(hs.shape[1],-1) + + _, feat_box = self.trajectories_auxiliary_branch(trajectory_rois) + + joint_reg = self.jointembed(torch.cat([hs,feat_box],-1)) + + rcnn_cls = point_cls + rcnn_reg = joint_reg + + if not self.training: + batch_dict['rois'] = batch_dict['rois'][:,:,0].contiguous() + rcnn_cls = rcnn_cls[-rcnn_cls.shape[0]//self.num_enc_layer:] + batch_cls_preds, batch_box_preds = self.generate_predicted_boxes( + batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg + ) + + batch_dict['batch_box_preds'] = batch_box_preds + + batch_dict['cls_preds_normalized'] = False + if self.avg_stage1_score: + stage1_score = batch_dict['roi_scores'][:,:,:1] + batch_cls_preds = F.sigmoid(batch_cls_preds) + if self.model_cfg.get('IOU_WEIGHT', None): + batch_box_preds_list = [] + roi_labels_list = [] + batch_cls_preds_list = [] + for bs_idx in range(batch_size): + car_mask = batch_dict['roi_labels'][bs_idx] ==1 + batch_cls_preds_car = batch_cls_preds[bs_idx].pow(self.model_cfg.IOU_WEIGHT[0])* \ + stage1_score[bs_idx].pow(1-self.model_cfg.IOU_WEIGHT[0]) + batch_cls_preds_car = batch_cls_preds_car[car_mask][None] + batch_cls_preds_pedcyc = batch_cls_preds[bs_idx].pow(self.model_cfg.IOU_WEIGHT[1])* \ + stage1_score[bs_idx].pow(1-self.model_cfg.IOU_WEIGHT[1]) + batch_cls_preds_pedcyc = batch_cls_preds_pedcyc[~car_mask][None] + cls_preds = torch.cat([batch_cls_preds_car,batch_cls_preds_pedcyc],1) + box_preds = torch.cat([batch_dict['batch_box_preds'][bs_idx][car_mask], + batch_dict['batch_box_preds'][bs_idx][~car_mask]],0)[None] + roi_labels = torch.cat([batch_dict['roi_labels'][bs_idx][car_mask], + batch_dict['roi_labels'][bs_idx][~car_mask]],0)[None] + batch_box_preds_list.append(box_preds) + roi_labels_list.append(roi_labels) + batch_cls_preds_list.append(cls_preds) + batch_dict['batch_box_preds'] = torch.cat(batch_box_preds_list,0) + batch_dict['roi_labels'] = torch.cat(roi_labels_list,0) + batch_cls_preds = torch.cat(batch_cls_preds_list,0) + + else: + batch_cls_preds = torch.sqrt(batch_cls_preds*stage1_score) + batch_dict['cls_preds_normalized'] = True + + batch_dict['batch_cls_preds'] = batch_cls_preds + + return batch_dict + + def reorder_memory(self, memory,num_max_rois): + + ordered_memory = memory[0].new_zeros([len(memory),num_max_rois,memory[0].shape[1],memory[0].shape[2]]) + for bs_idx in range(len(memory)): + ordered_memory[bs_idx,:len(memory[bs_idx])] = memory[bs_idx] + return ordered_memory + + def generate_predicted_boxes(self, batch_size, rois, cls_preds=None, box_preds=None): + """ + Args: + batch_size: + rois: (B, N, 7) + cls_preds: (BN, num_class) + box_preds: (BN, code_size) + Returns: + """ + code_size = self.box_coder.code_size + + if cls_preds is not None: + batch_cls_preds = cls_preds.view(batch_size, -1, cls_preds.shape[-1]) + else: + batch_cls_preds = None + batch_box_preds = box_preds.view(batch_size, -1, code_size) + + roi_ry = rois[:, :, 6].view(-1) + roi_xyz = rois[:, :, 0:3].view(-1, 3) + local_rois = rois.clone().detach() + local_rois[:, :, 0:3] = 0 + + batch_box_preds = self.box_coder.decode_torch(batch_box_preds, local_rois).view(-1, code_size) + + batch_box_preds = common_utils.rotate_points_along_z( + batch_box_preds.unsqueeze(dim=1), roi_ry + ).squeeze(dim=1) + + batch_box_preds[:, 0:3] += roi_xyz + batch_box_preds = batch_box_preds.view(batch_size, -1, code_size) + batch_box_preds = torch.cat([batch_box_preds,rois[:,:,7:]],-1) + return batch_cls_preds, batch_box_preds \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/models/roi_heads/partA2_head.py b/toolbox/openpcdet/pcdet/models/roi_heads/partA2_head.py new file mode 100644 index 000000000..8d7c07ce8 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/roi_heads/partA2_head.py @@ -0,0 +1,224 @@ +import numpy as np +import torch +import torch.nn as nn + +from ...ops.roiaware_pool3d import roiaware_pool3d_utils +from ...utils.spconv_utils import spconv +from .roi_head_template import RoIHeadTemplate + + +class PartA2FCHead(RoIHeadTemplate): + def __init__(self, input_channels, model_cfg, num_class=1, **kwargs): + super().__init__(num_class=num_class, model_cfg=model_cfg) + self.model_cfg = model_cfg + + self.SA_modules = nn.ModuleList() + block = self.post_act_block + + c0 = self.model_cfg.ROI_AWARE_POOL.NUM_FEATURES // 2 + self.conv_part = spconv.SparseSequential( + block(4, 64, 3, padding=1, indice_key='rcnn_subm1'), + block(64, c0, 3, padding=1, indice_key='rcnn_subm1_1'), + ) + self.conv_rpn = spconv.SparseSequential( + block(input_channels, 64, 3, padding=1, indice_key='rcnn_subm2'), + block(64, c0, 3, padding=1, indice_key='rcnn_subm1_2'), + ) + + shared_fc_list = [] + pool_size = self.model_cfg.ROI_AWARE_POOL.POOL_SIZE + pre_channel = self.model_cfg.ROI_AWARE_POOL.NUM_FEATURES * pool_size * pool_size * pool_size + for k in range(0, self.model_cfg.SHARED_FC.__len__()): + shared_fc_list.extend([ + nn.Conv1d(pre_channel, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False), + nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]), + nn.ReLU() + ]) + pre_channel = self.model_cfg.SHARED_FC[k] + + if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0: + shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO)) + + self.shared_fc_layer = nn.Sequential(*shared_fc_list) + + self.cls_layers = self.make_fc_layers( + input_channels=pre_channel, output_channels=self.num_class, fc_list=self.model_cfg.CLS_FC + ) + self.reg_layers = self.make_fc_layers( + input_channels=pre_channel, + output_channels=self.box_coder.code_size * self.num_class, + fc_list=self.model_cfg.REG_FC + ) + + self.roiaware_pool3d_layer = roiaware_pool3d_utils.RoIAwarePool3d( + out_size=self.model_cfg.ROI_AWARE_POOL.POOL_SIZE, + max_pts_each_voxel=self.model_cfg.ROI_AWARE_POOL.MAX_POINTS_PER_VOXEL + ) + self.init_weights(weight_init='xavier') + + def init_weights(self, weight_init='xavier'): + if weight_init == 'kaiming': + init_func = nn.init.kaiming_normal_ + elif weight_init == 'xavier': + init_func = nn.init.xavier_normal_ + elif weight_init == 'normal': + init_func = nn.init.normal_ + else: + raise NotImplementedError + + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d): + if weight_init == 'normal': + init_func(m.weight, mean=0, std=0.001) + else: + init_func(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001) + + def post_act_block(self, in_channels, out_channels, kernel_size, indice_key, stride=1, padding=0, conv_type='subm'): + if conv_type == 'subm': + m = spconv.SparseSequential( + spconv.SubMConv3d(in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key), + nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01), + nn.ReLU(), + ) + elif conv_type == 'spconv': + m = spconv.SparseSequential( + spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, + bias=False, indice_key=indice_key), + nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01), + nn.ReLU(), + ) + elif conv_type == 'inverseconv': + m = spconv.SparseSequential( + spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, + indice_key=indice_key, bias=False), + nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.01), + nn.ReLU(), + ) + else: + raise NotImplementedError + return m + + def roiaware_pool(self, batch_dict): + """ + Args: + batch_dict: + batch_size: + rois: (B, num_rois, 7 + C) + point_coords: (num_points, 4) [bs_idx, x, y, z] + point_features: (num_points, C) + point_cls_scores: (N1 + N2 + N3 + ..., 1) + point_part_offset: (N1 + N2 + N3 + ..., 3) + Returns: + + """ + batch_size = batch_dict['batch_size'] + batch_idx = batch_dict['point_coords'][:, 0] + point_coords = batch_dict['point_coords'][:, 1:4] + point_features = batch_dict['point_features'] + part_features = torch.cat(( + batch_dict['point_part_offset'] if not self.model_cfg.get('DISABLE_PART', False) else point_coords, + batch_dict['point_cls_scores'].view(-1, 1).detach() + ), dim=1) + part_features[part_features[:, -1] < self.model_cfg.SEG_MASK_SCORE_THRESH, 0:3] = 0 + + rois = batch_dict['rois'] + + pooled_part_features_list, pooled_rpn_features_list = [], [] + + for bs_idx in range(batch_size): + bs_mask = (batch_idx == bs_idx) + cur_point_coords = point_coords[bs_mask] + cur_part_features = part_features[bs_mask] + cur_rpn_features = point_features[bs_mask] + cur_roi = rois[bs_idx][:, 0:7].contiguous() # (N, 7) + + pooled_part_features = self.roiaware_pool3d_layer.forward( + cur_roi, cur_point_coords, cur_part_features, pool_method='avg' + ) # (N, out_x, out_y, out_z, 4) + pooled_rpn_features = self.roiaware_pool3d_layer.forward( + cur_roi, cur_point_coords, cur_rpn_features, pool_method='max' + ) # (N, out_x, out_y, out_z, C) + + pooled_part_features_list.append(pooled_part_features) + pooled_rpn_features_list.append(pooled_rpn_features) + + pooled_part_features = torch.cat(pooled_part_features_list, dim=0) # (B * N, out_x, out_y, out_z, 4) + pooled_rpn_features = torch.cat(pooled_rpn_features_list, dim=0) # (B * N, out_x, out_y, out_z, C) + + return pooled_part_features, pooled_rpn_features + + @staticmethod + def fake_sparse_idx(sparse_idx, batch_size_rcnn): + print('Warning: Sparse_Idx_Shape(%s) \r' % (str(sparse_idx.shape)), end='', flush=True) + # at most one sample is non-empty, then fake the first voxels of each sample(BN needs at least + # two values each channel) as non-empty for the below calculation + sparse_idx = sparse_idx.new_zeros((batch_size_rcnn, 3)) + bs_idxs = torch.arange(batch_size_rcnn).type_as(sparse_idx).view(-1, 1) + sparse_idx = torch.cat((bs_idxs, sparse_idx), dim=1) + return sparse_idx + + def forward(self, batch_dict): + """ + Args: + batch_dict: + + Returns: + + """ + targets_dict = self.proposal_layer( + batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST'] + ) + if self.training: + targets_dict = self.assign_targets(batch_dict) + batch_dict['rois'] = targets_dict['rois'] + batch_dict['roi_labels'] = targets_dict['roi_labels'] + + # RoI aware pooling + pooled_part_features, pooled_rpn_features = self.roiaware_pool(batch_dict) + batch_size_rcnn = pooled_part_features.shape[0] # (B * N, out_x, out_y, out_z, 4) + + # transform to sparse tensors + sparse_shape = np.array(pooled_part_features.shape[1:4], dtype=np.int32) + sparse_idx = pooled_part_features.sum(dim=-1).nonzero() # (non_empty_num, 4) ==> [bs_idx, x_idx, y_idx, z_idx] + if sparse_idx.shape[0] < 3: + sparse_idx = self.fake_sparse_idx(sparse_idx, batch_size_rcnn) + if self.training: + # these are invalid samples + targets_dict['rcnn_cls_labels'].fill_(-1) + targets_dict['reg_valid_mask'].fill_(-1) + + part_features = pooled_part_features[sparse_idx[:, 0], sparse_idx[:, 1], sparse_idx[:, 2], sparse_idx[:, 3]] + rpn_features = pooled_rpn_features[sparse_idx[:, 0], sparse_idx[:, 1], sparse_idx[:, 2], sparse_idx[:, 3]] + coords = sparse_idx.int().contiguous() + part_features = spconv.SparseConvTensor(part_features, coords, sparse_shape, batch_size_rcnn) + rpn_features = spconv.SparseConvTensor(rpn_features, coords, sparse_shape, batch_size_rcnn) + + # forward rcnn network + x_part = self.conv_part(part_features) + x_rpn = self.conv_rpn(rpn_features) + + merged_feature = torch.cat((x_rpn.features, x_part.features), dim=1) # (N, C) + shared_feature = spconv.SparseConvTensor(merged_feature, coords, sparse_shape, batch_size_rcnn) + shared_feature = shared_feature.dense().view(batch_size_rcnn, -1, 1) + + shared_feature = self.shared_fc_layer(shared_feature) + + rcnn_cls = self.cls_layers(shared_feature).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2) + rcnn_reg = self.reg_layers(shared_feature).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C) + + if not self.training: + batch_cls_preds, batch_box_preds = self.generate_predicted_boxes( + batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg + ) + batch_dict['batch_cls_preds'] = batch_cls_preds + batch_dict['batch_box_preds'] = batch_box_preds + batch_dict['cls_preds_normalized'] = False + else: + targets_dict['rcnn_cls'] = rcnn_cls + targets_dict['rcnn_reg'] = rcnn_reg + + self.forward_ret_dict = targets_dict + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/roi_heads/pointrcnn_head.py b/toolbox/openpcdet/pcdet/models/roi_heads/pointrcnn_head.py new file mode 100644 index 000000000..933f38475 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/roi_heads/pointrcnn_head.py @@ -0,0 +1,179 @@ +import torch +import torch.nn as nn + +from ...ops.pointnet2.pointnet2_batch import pointnet2_modules +from ...ops.roipoint_pool3d import roipoint_pool3d_utils +from ...utils import common_utils +from .roi_head_template import RoIHeadTemplate + + +class PointRCNNHead(RoIHeadTemplate): + def __init__(self, input_channels, model_cfg, num_class=1, **kwargs): + super().__init__(num_class=num_class, model_cfg=model_cfg) + self.model_cfg = model_cfg + use_bn = self.model_cfg.USE_BN + self.SA_modules = nn.ModuleList() + channel_in = input_channels + + self.num_prefix_channels = 3 + 2 # xyz + point_scores + point_depth + xyz_mlps = [self.num_prefix_channels] + self.model_cfg.XYZ_UP_LAYER + shared_mlps = [] + for k in range(len(xyz_mlps) - 1): + shared_mlps.append(nn.Conv2d(xyz_mlps[k], xyz_mlps[k + 1], kernel_size=1, bias=not use_bn)) + if use_bn: + shared_mlps.append(nn.BatchNorm2d(xyz_mlps[k + 1])) + shared_mlps.append(nn.ReLU()) + self.xyz_up_layer = nn.Sequential(*shared_mlps) + + c_out = self.model_cfg.XYZ_UP_LAYER[-1] + self.merge_down_layer = nn.Sequential( + nn.Conv2d(c_out * 2, c_out, kernel_size=1, bias=not use_bn), + *[nn.BatchNorm2d(c_out), nn.ReLU()] if use_bn else [nn.ReLU()] + ) + + for k in range(self.model_cfg.SA_CONFIG.NPOINTS.__len__()): + mlps = [channel_in] + self.model_cfg.SA_CONFIG.MLPS[k] + + npoint = self.model_cfg.SA_CONFIG.NPOINTS[k] if self.model_cfg.SA_CONFIG.NPOINTS[k] != -1 else None + self.SA_modules.append( + pointnet2_modules.PointnetSAModule( + npoint=npoint, + radius=self.model_cfg.SA_CONFIG.RADIUS[k], + nsample=self.model_cfg.SA_CONFIG.NSAMPLE[k], + mlp=mlps, + use_xyz=True, + bn=use_bn + ) + ) + channel_in = mlps[-1] + + self.cls_layers = self.make_fc_layers( + input_channels=channel_in, output_channels=self.num_class, fc_list=self.model_cfg.CLS_FC + ) + self.reg_layers = self.make_fc_layers( + input_channels=channel_in, + output_channels=self.box_coder.code_size * self.num_class, + fc_list=self.model_cfg.REG_FC + ) + + self.roipoint_pool3d_layer = roipoint_pool3d_utils.RoIPointPool3d( + num_sampled_points=self.model_cfg.ROI_POINT_POOL.NUM_SAMPLED_POINTS, + pool_extra_width=self.model_cfg.ROI_POINT_POOL.POOL_EXTRA_WIDTH + ) + self.init_weights(weight_init='xavier') + + def init_weights(self, weight_init='xavier'): + if weight_init == 'kaiming': + init_func = nn.init.kaiming_normal_ + elif weight_init == 'xavier': + init_func = nn.init.xavier_normal_ + elif weight_init == 'normal': + init_func = nn.init.normal_ + else: + raise NotImplementedError + + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d): + if weight_init == 'normal': + init_func(m.weight, mean=0, std=0.001) + else: + init_func(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001) + + def roipool3d_gpu(self, batch_dict): + """ + Args: + batch_dict: + batch_size: + rois: (B, num_rois, 7 + C) + point_coords: (num_points, 4) [bs_idx, x, y, z] + point_features: (num_points, C) + point_cls_scores: (N1 + N2 + N3 + ..., 1) + point_part_offset: (N1 + N2 + N3 + ..., 3) + Returns: + + """ + batch_size = batch_dict['batch_size'] + batch_idx = batch_dict['point_coords'][:, 0] + point_coords = batch_dict['point_coords'][:, 1:4] + point_features = batch_dict['point_features'] + rois = batch_dict['rois'] # (B, num_rois, 7 + C) + batch_cnt = point_coords.new_zeros(batch_size).int() + for bs_idx in range(batch_size): + batch_cnt[bs_idx] = (batch_idx == bs_idx).sum() + + assert batch_cnt.min() == batch_cnt.max() + + point_scores = batch_dict['point_cls_scores'].detach() + point_depths = point_coords.norm(dim=1) / self.model_cfg.ROI_POINT_POOL.DEPTH_NORMALIZER - 0.5 + point_features_list = [point_scores[:, None], point_depths[:, None], point_features] + point_features_all = torch.cat(point_features_list, dim=1) + batch_points = point_coords.view(batch_size, -1, 3) + batch_point_features = point_features_all.view(batch_size, -1, point_features_all.shape[-1]) + + with torch.no_grad(): + pooled_features, pooled_empty_flag = self.roipoint_pool3d_layer( + batch_points, batch_point_features, rois + ) # pooled_features: (B, num_rois, num_sampled_points, 3 + C), pooled_empty_flag: (B, num_rois) + + # canonical transformation + roi_center = rois[:, :, 0:3] + pooled_features[:, :, :, 0:3] -= roi_center.unsqueeze(dim=2) + + pooled_features = pooled_features.view(-1, pooled_features.shape[-2], pooled_features.shape[-1]) + pooled_features[:, :, 0:3] = common_utils.rotate_points_along_z( + pooled_features[:, :, 0:3], -rois.view(-1, rois.shape[-1])[:, 6] + ) + pooled_features[pooled_empty_flag.view(-1) > 0] = 0 + return pooled_features + + def forward(self, batch_dict): + """ + Args: + batch_dict: + + Returns: + + """ + targets_dict = self.proposal_layer( + batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST'] + ) + if self.training: + targets_dict = self.assign_targets(batch_dict) + batch_dict['rois'] = targets_dict['rois'] + batch_dict['roi_labels'] = targets_dict['roi_labels'] + + pooled_features = self.roipool3d_gpu(batch_dict) # (total_rois, num_sampled_points, 3 + C) + + xyz_input = pooled_features[..., 0:self.num_prefix_channels].transpose(1, 2).unsqueeze(dim=3).contiguous() + xyz_features = self.xyz_up_layer(xyz_input) + point_features = pooled_features[..., self.num_prefix_channels:].transpose(1, 2).unsqueeze(dim=3) + merged_features = torch.cat((xyz_features, point_features), dim=1) + merged_features = self.merge_down_layer(merged_features) + + l_xyz, l_features = [pooled_features[..., 0:3].contiguous()], [merged_features.squeeze(dim=3).contiguous()] + + for i in range(len(self.SA_modules)): + li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i]) + l_xyz.append(li_xyz) + l_features.append(li_features) + + shared_features = l_features[-1] # (total_rois, num_features, 1) + rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2) + rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C) + + if not self.training: + batch_cls_preds, batch_box_preds = self.generate_predicted_boxes( + batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg + ) + batch_dict['batch_cls_preds'] = batch_cls_preds + batch_dict['batch_box_preds'] = batch_box_preds + batch_dict['cls_preds_normalized'] = False + else: + targets_dict['rcnn_cls'] = rcnn_cls + targets_dict['rcnn_reg'] = rcnn_reg + + self.forward_ret_dict = targets_dict + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/roi_heads/pvrcnn_head.py b/toolbox/openpcdet/pcdet/models/roi_heads/pvrcnn_head.py new file mode 100644 index 000000000..6ec6b9806 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/roi_heads/pvrcnn_head.py @@ -0,0 +1,175 @@ +import torch.nn as nn + +from ...ops.pointnet2.pointnet2_stack import pointnet2_modules as pointnet2_stack_modules +from ...utils import common_utils +from .roi_head_template import RoIHeadTemplate + + +class PVRCNNHead(RoIHeadTemplate): + def __init__(self, input_channels, model_cfg, num_class=1, **kwargs): + super().__init__(num_class=num_class, model_cfg=model_cfg) + self.model_cfg = model_cfg + + self.roi_grid_pool_layer, num_c_out = pointnet2_stack_modules.build_local_aggregation_module( + input_channels=input_channels, config=self.model_cfg.ROI_GRID_POOL + ) + + GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE + pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * num_c_out + + shared_fc_list = [] + for k in range(0, self.model_cfg.SHARED_FC.__len__()): + shared_fc_list.extend([ + nn.Conv1d(pre_channel, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False), + nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]), + nn.ReLU() + ]) + pre_channel = self.model_cfg.SHARED_FC[k] + + if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0: + shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO)) + + self.shared_fc_layer = nn.Sequential(*shared_fc_list) + + self.cls_layers = self.make_fc_layers( + input_channels=pre_channel, output_channels=self.num_class, fc_list=self.model_cfg.CLS_FC + ) + self.reg_layers = self.make_fc_layers( + input_channels=pre_channel, + output_channels=self.box_coder.code_size * self.num_class, + fc_list=self.model_cfg.REG_FC + ) + self.init_weights(weight_init='xavier') + + def init_weights(self, weight_init='xavier'): + if weight_init == 'kaiming': + init_func = nn.init.kaiming_normal_ + elif weight_init == 'xavier': + init_func = nn.init.xavier_normal_ + elif weight_init == 'normal': + init_func = nn.init.normal_ + else: + raise NotImplementedError + + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d): + if weight_init == 'normal': + init_func(m.weight, mean=0, std=0.001) + else: + init_func(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001) + + def roi_grid_pool(self, batch_dict): + """ + Args: + batch_dict: + batch_size: + rois: (B, num_rois, 7 + C) + point_coords: (num_points, 4) [bs_idx, x, y, z] + point_features: (num_points, C) + point_cls_scores: (N1 + N2 + N3 + ..., 1) + point_part_offset: (N1 + N2 + N3 + ..., 3) + Returns: + + """ + batch_size = batch_dict['batch_size'] + rois = batch_dict['rois'] + point_coords = batch_dict['point_coords'] + point_features = batch_dict['point_features'] + + point_features = point_features * batch_dict['point_cls_scores'].view(-1, 1) + + global_roi_grid_points, local_roi_grid_points = self.get_global_grid_points_of_roi( + rois, grid_size=self.model_cfg.ROI_GRID_POOL.GRID_SIZE + ) # (BxN, 6x6x6, 3) + global_roi_grid_points = global_roi_grid_points.view(batch_size, -1, 3) # (B, Nx6x6x6, 3) + + xyz = point_coords[:, 1:4] + xyz_batch_cnt = xyz.new_zeros(batch_size).int() + batch_idx = point_coords[:, 0] + for k in range(batch_size): + xyz_batch_cnt[k] = (batch_idx == k).sum() + + new_xyz = global_roi_grid_points.view(-1, 3) + new_xyz_batch_cnt = xyz.new_zeros(batch_size).int().fill_(global_roi_grid_points.shape[1]) + pooled_points, pooled_features = self.roi_grid_pool_layer( + xyz=xyz.contiguous(), + xyz_batch_cnt=xyz_batch_cnt, + new_xyz=new_xyz, + new_xyz_batch_cnt=new_xyz_batch_cnt, + features=point_features.contiguous(), + ) # (M1 + M2 ..., C) + + pooled_features = pooled_features.view( + -1, self.model_cfg.ROI_GRID_POOL.GRID_SIZE ** 3, + pooled_features.shape[-1] + ) # (BxN, 6x6x6, C) + return pooled_features + + def get_global_grid_points_of_roi(self, rois, grid_size): + rois = rois.view(-1, rois.shape[-1]) + batch_size_rcnn = rois.shape[0] + + local_roi_grid_points = self.get_dense_grid_points(rois, batch_size_rcnn, grid_size) # (B, 6x6x6, 3) + global_roi_grid_points = common_utils.rotate_points_along_z( + local_roi_grid_points.clone(), rois[:, 6] + ).squeeze(dim=1) + global_center = rois[:, 0:3].clone() + global_roi_grid_points += global_center.unsqueeze(dim=1) + return global_roi_grid_points, local_roi_grid_points + + @staticmethod + def get_dense_grid_points(rois, batch_size_rcnn, grid_size): + faked_features = rois.new_ones((grid_size, grid_size, grid_size)) + dense_idx = faked_features.nonzero() # (N, 3) [x_idx, y_idx, z_idx] + dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() # (B, 6x6x6, 3) + + local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6] + roi_grid_points = (dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze(dim=1) \ + - (local_roi_size.unsqueeze(dim=1) / 2) # (B, 6x6x6, 3) + return roi_grid_points + + def forward(self, batch_dict): + """ + :param input_data: input dict + :return: + """ + + targets_dict = self.proposal_layer( + batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST'] + ) + if self.training: + targets_dict = batch_dict.get('roi_targets_dict', None) + if targets_dict is None: + targets_dict = self.assign_targets(batch_dict) + batch_dict['rois'] = targets_dict['rois'] + batch_dict['roi_labels'] = targets_dict['roi_labels'] + + # RoI aware pooling + pooled_features = self.roi_grid_pool(batch_dict) # (BxN, 6x6x6, C) + + grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE + batch_size_rcnn = pooled_features.shape[0] + pooled_features = pooled_features.permute(0, 2, 1).\ + contiguous().view(batch_size_rcnn, -1, grid_size, grid_size, grid_size) # (BxN, C, 6, 6, 6) + + shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1)) + rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2) + rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C) + + if not self.training: + batch_cls_preds, batch_box_preds = self.generate_predicted_boxes( + batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg + ) + batch_dict['batch_cls_preds'] = batch_cls_preds + batch_dict['batch_box_preds'] = batch_box_preds + batch_dict['cls_preds_normalized'] = False + else: + targets_dict['rcnn_cls'] = rcnn_cls + targets_dict['rcnn_reg'] = rcnn_reg + + self.forward_ret_dict = targets_dict + + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/roi_heads/roi_head_template.py b/toolbox/openpcdet/pcdet/models/roi_heads/roi_head_template.py new file mode 100644 index 000000000..5151614c6 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/roi_heads/roi_head_template.py @@ -0,0 +1,261 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ...utils import box_coder_utils, common_utils, loss_utils +from ..model_utils.model_nms_utils import class_agnostic_nms +from .target_assigner.proposal_target_layer import ProposalTargetLayer + + +class RoIHeadTemplate(nn.Module): + def __init__(self, num_class, model_cfg, **kwargs): + super().__init__() + self.model_cfg = model_cfg + self.num_class = num_class + self.box_coder = getattr(box_coder_utils, self.model_cfg.TARGET_CONFIG.BOX_CODER)( + **self.model_cfg.TARGET_CONFIG.get('BOX_CODER_CONFIG', {}) + ) + self.proposal_target_layer = ProposalTargetLayer(roi_sampler_cfg=self.model_cfg.TARGET_CONFIG) + self.build_losses(self.model_cfg.LOSS_CONFIG) + self.forward_ret_dict = None + + def build_losses(self, losses_cfg): + self.add_module( + 'reg_loss_func', + loss_utils.WeightedSmoothL1Loss(code_weights=losses_cfg.LOSS_WEIGHTS['code_weights']) + ) + + def make_fc_layers(self, input_channels, output_channels, fc_list): + fc_layers = [] + pre_channel = input_channels + for k in range(0, fc_list.__len__()): + fc_layers.extend([ + nn.Conv1d(pre_channel, fc_list[k], kernel_size=1, bias=False), + nn.BatchNorm1d(fc_list[k]), + nn.ReLU() + ]) + pre_channel = fc_list[k] + if self.model_cfg.DP_RATIO >= 0 and k == 0: + fc_layers.append(nn.Dropout(self.model_cfg.DP_RATIO)) + fc_layers.append(nn.Conv1d(pre_channel, output_channels, kernel_size=1, bias=True)) + fc_layers = nn.Sequential(*fc_layers) + return fc_layers + + @torch.no_grad() + def proposal_layer(self, batch_dict, nms_config): + """ + Args: + batch_dict: + batch_size: + batch_cls_preds: (B, num_boxes, num_classes | 1) or (N1+N2+..., num_classes | 1) + batch_box_preds: (B, num_boxes, 7+C) or (N1+N2+..., 7+C) + cls_preds_normalized: indicate whether batch_cls_preds is normalized + batch_index: optional (N1+N2+...) + nms_config: + + Returns: + batch_dict: + rois: (B, num_rois, 7+C) + roi_scores: (B, num_rois) + roi_labels: (B, num_rois) + + """ + if batch_dict.get('rois', None) is not None: + return batch_dict + + batch_size = batch_dict['batch_size'] + batch_box_preds = batch_dict['batch_box_preds'] + batch_cls_preds = batch_dict['batch_cls_preds'] + rois = batch_box_preds.new_zeros((batch_size, nms_config.NMS_POST_MAXSIZE, batch_box_preds.shape[-1])) + roi_scores = batch_box_preds.new_zeros((batch_size, nms_config.NMS_POST_MAXSIZE)) + roi_labels = batch_box_preds.new_zeros((batch_size, nms_config.NMS_POST_MAXSIZE), dtype=torch.long) + + for index in range(batch_size): + if batch_dict.get('batch_index', None) is not None: + assert batch_cls_preds.shape.__len__() == 2 + batch_mask = (batch_dict['batch_index'] == index) + else: + assert batch_dict['batch_cls_preds'].shape.__len__() == 3 + batch_mask = index + box_preds = batch_box_preds[batch_mask] + cls_preds = batch_cls_preds[batch_mask] + + cur_roi_scores, cur_roi_labels = torch.max(cls_preds, dim=1) + + if nms_config.MULTI_CLASSES_NMS: + raise NotImplementedError + else: + selected, selected_scores = class_agnostic_nms( + box_scores=cur_roi_scores, box_preds=box_preds, nms_config=nms_config + ) + + rois[index, :len(selected), :] = box_preds[selected] + roi_scores[index, :len(selected)] = cur_roi_scores[selected] + roi_labels[index, :len(selected)] = cur_roi_labels[selected] + + batch_dict['rois'] = rois + batch_dict['roi_scores'] = roi_scores + batch_dict['roi_labels'] = roi_labels + 1 + batch_dict['has_class_labels'] = True if batch_cls_preds.shape[-1] > 1 else False + batch_dict.pop('batch_index', None) + return batch_dict + + def assign_targets(self, batch_dict): + batch_size = batch_dict['batch_size'] + with torch.no_grad(): + targets_dict = self.proposal_target_layer.forward(batch_dict) + + rois = targets_dict['rois'] # (B, N, 7 + C) + gt_of_rois = targets_dict['gt_of_rois'] # (B, N, 7 + C + 1) + targets_dict['gt_of_rois_src'] = gt_of_rois.clone().detach() + + # canonical transformation + roi_center = rois[:, :, 0:3] + roi_ry = rois[:, :, 6] % (2 * np.pi) + gt_of_rois[:, :, 0:3] = gt_of_rois[:, :, 0:3] - roi_center + gt_of_rois[:, :, 6] = gt_of_rois[:, :, 6] - roi_ry + + # transfer LiDAR coords to local coords + gt_of_rois = common_utils.rotate_points_along_z( + points=gt_of_rois.view(-1, 1, gt_of_rois.shape[-1]), angle=-roi_ry.view(-1) + ).view(batch_size, -1, gt_of_rois.shape[-1]) + + # flip orientation if rois have opposite orientation + heading_label = gt_of_rois[:, :, 6] % (2 * np.pi) # 0 ~ 2pi + opposite_flag = (heading_label > np.pi * 0.5) & (heading_label < np.pi * 1.5) + heading_label[opposite_flag] = (heading_label[opposite_flag] + np.pi) % (2 * np.pi) # (0 ~ pi/2, 3pi/2 ~ 2pi) + flag = heading_label > np.pi + heading_label[flag] = heading_label[flag] - np.pi * 2 # (-pi/2, pi/2) + heading_label = torch.clamp(heading_label, min=-np.pi / 2, max=np.pi / 2) + + gt_of_rois[:, :, 6] = heading_label + targets_dict['gt_of_rois'] = gt_of_rois + return targets_dict + + def get_box_reg_layer_loss(self, forward_ret_dict): + loss_cfgs = self.model_cfg.LOSS_CONFIG + code_size = self.box_coder.code_size + reg_valid_mask = forward_ret_dict['reg_valid_mask'].view(-1) + gt_boxes3d_ct = forward_ret_dict['gt_of_rois'][..., 0:code_size] + gt_of_rois_src = forward_ret_dict['gt_of_rois_src'][..., 0:code_size].view(-1, code_size) + rcnn_reg = forward_ret_dict['rcnn_reg'] # (rcnn_batch_size, C) + roi_boxes3d = forward_ret_dict['rois'] + rcnn_batch_size = gt_boxes3d_ct.view(-1, code_size).shape[0] + + fg_mask = (reg_valid_mask > 0) + fg_sum = fg_mask.long().sum().item() + + tb_dict = {} + + if loss_cfgs.REG_LOSS == 'smooth-l1': + rois_anchor = roi_boxes3d.clone().detach().view(-1, code_size) + rois_anchor[:, 0:3] = 0 + rois_anchor[:, 6] = 0 + reg_targets = self.box_coder.encode_torch( + gt_boxes3d_ct.view(rcnn_batch_size, code_size), rois_anchor + ) + + rcnn_loss_reg = self.reg_loss_func( + rcnn_reg.view(rcnn_batch_size, -1).unsqueeze(dim=0), + reg_targets.unsqueeze(dim=0), + ) # [B, M, 7] + rcnn_loss_reg = (rcnn_loss_reg.view(rcnn_batch_size, -1) * fg_mask.unsqueeze(dim=-1).float()).sum() / max(fg_sum, 1) + rcnn_loss_reg = rcnn_loss_reg * loss_cfgs.LOSS_WEIGHTS['rcnn_reg_weight'] + tb_dict['rcnn_loss_reg'] = rcnn_loss_reg.item() + + if loss_cfgs.CORNER_LOSS_REGULARIZATION and fg_sum > 0: + # TODO: NEED to BE CHECK + fg_rcnn_reg = rcnn_reg.view(rcnn_batch_size, -1)[fg_mask] + fg_roi_boxes3d = roi_boxes3d.view(-1, code_size)[fg_mask] + + fg_roi_boxes3d = fg_roi_boxes3d.view(1, -1, code_size) + batch_anchors = fg_roi_boxes3d.clone().detach() + roi_ry = fg_roi_boxes3d[:, :, 6].view(-1) + roi_xyz = fg_roi_boxes3d[:, :, 0:3].view(-1, 3) + batch_anchors[:, :, 0:3] = 0 + rcnn_boxes3d = self.box_coder.decode_torch( + fg_rcnn_reg.view(batch_anchors.shape[0], -1, code_size), batch_anchors + ).view(-1, code_size) + + rcnn_boxes3d = common_utils.rotate_points_along_z( + rcnn_boxes3d.unsqueeze(dim=1), roi_ry + ).squeeze(dim=1) + rcnn_boxes3d[:, 0:3] += roi_xyz + + loss_corner = loss_utils.get_corner_loss_lidar( + rcnn_boxes3d[:, 0:7], + gt_of_rois_src[fg_mask][:, 0:7] + ) + loss_corner = loss_corner.mean() + loss_corner = loss_corner * loss_cfgs.LOSS_WEIGHTS['rcnn_corner_weight'] + + rcnn_loss_reg += loss_corner + tb_dict['rcnn_loss_corner'] = loss_corner.item() + else: + raise NotImplementedError + + return rcnn_loss_reg, tb_dict + + def get_box_cls_layer_loss(self, forward_ret_dict): + loss_cfgs = self.model_cfg.LOSS_CONFIG + rcnn_cls = forward_ret_dict['rcnn_cls'] + rcnn_cls_labels = forward_ret_dict['rcnn_cls_labels'].view(-1) + if loss_cfgs.CLS_LOSS == 'BinaryCrossEntropy': + rcnn_cls_flat = rcnn_cls.view(-1) + batch_loss_cls = F.binary_cross_entropy(torch.sigmoid(rcnn_cls_flat), rcnn_cls_labels.float(), reduction='none') + cls_valid_mask = (rcnn_cls_labels >= 0).float() + rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0) + elif loss_cfgs.CLS_LOSS == 'CrossEntropy': + batch_loss_cls = F.cross_entropy(rcnn_cls, rcnn_cls_labels, reduction='none', ignore_index=-1) + cls_valid_mask = (rcnn_cls_labels >= 0).float() + rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0) + else: + raise NotImplementedError + + rcnn_loss_cls = rcnn_loss_cls * loss_cfgs.LOSS_WEIGHTS['rcnn_cls_weight'] + tb_dict = {'rcnn_loss_cls': rcnn_loss_cls.item()} + return rcnn_loss_cls, tb_dict + + def get_loss(self, tb_dict=None): + tb_dict = {} if tb_dict is None else tb_dict + rcnn_loss = 0 + rcnn_loss_cls, cls_tb_dict = self.get_box_cls_layer_loss(self.forward_ret_dict) + rcnn_loss += rcnn_loss_cls + tb_dict.update(cls_tb_dict) + + rcnn_loss_reg, reg_tb_dict = self.get_box_reg_layer_loss(self.forward_ret_dict) + rcnn_loss += rcnn_loss_reg + tb_dict.update(reg_tb_dict) + tb_dict['rcnn_loss'] = rcnn_loss.item() + return rcnn_loss, tb_dict + + def generate_predicted_boxes(self, batch_size, rois, cls_preds, box_preds): + """ + Args: + batch_size: + rois: (B, N, 7) + cls_preds: (BN, num_class) + box_preds: (BN, code_size) + + Returns: + + """ + code_size = self.box_coder.code_size + # batch_cls_preds: (B, N, num_class or 1) + batch_cls_preds = cls_preds.view(batch_size, -1, cls_preds.shape[-1]) + batch_box_preds = box_preds.view(batch_size, -1, code_size) + + roi_ry = rois[:, :, 6].view(-1) + roi_xyz = rois[:, :, 0:3].view(-1, 3) + local_rois = rois.clone().detach() + local_rois[:, :, 0:3] = 0 + + batch_box_preds = self.box_coder.decode_torch(batch_box_preds, local_rois).view(-1, code_size) + + batch_box_preds = common_utils.rotate_points_along_z( + batch_box_preds.unsqueeze(dim=1), roi_ry + ).squeeze(dim=1) + batch_box_preds[:, 0:3] += roi_xyz + batch_box_preds = batch_box_preds.view(batch_size, -1, code_size) + return batch_cls_preds, batch_box_preds diff --git a/toolbox/openpcdet/pcdet/models/roi_heads/second_head.py b/toolbox/openpcdet/pcdet/models/roi_heads/second_head.py new file mode 100644 index 000000000..0f96c620b --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/roi_heads/second_head.py @@ -0,0 +1,188 @@ +from functools import partial + +import torch +import torch.nn as nn +import torch.nn.functional as F +from .roi_head_template import RoIHeadTemplate +from ...utils import common_utils, loss_utils + + +class SECONDHead(RoIHeadTemplate): + def __init__(self, input_channels, model_cfg, num_class=1, **kwargs): + super().__init__(num_class=num_class, model_cfg=model_cfg) + self.model_cfg = model_cfg + + GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE + pre_channel = self.model_cfg.ROI_GRID_POOL.IN_CHANNEL * GRID_SIZE * GRID_SIZE + + shared_fc_list = [] + for k in range(0, self.model_cfg.SHARED_FC.__len__()): + shared_fc_list.extend([ + nn.Conv1d(pre_channel, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False), + nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]), + nn.ReLU() + ]) + pre_channel = self.model_cfg.SHARED_FC[k] + + if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0: + shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO)) + + self.shared_fc_layer = nn.Sequential(*shared_fc_list) + + self.iou_layers = self.make_fc_layers( + input_channels=pre_channel, output_channels=1, fc_list=self.model_cfg.IOU_FC + ) + self.init_weights(weight_init='xavier') + + if torch.__version__ >= '1.3': + self.affine_grid = partial(F.affine_grid, align_corners=True) + self.grid_sample = partial(F.grid_sample, align_corners=True) + else: + self.affine_grid = F.affine_grid + self.grid_sample = F.grid_sample + + def init_weights(self, weight_init='xavier'): + if weight_init == 'kaiming': + init_func = nn.init.kaiming_normal_ + elif weight_init == 'xavier': + init_func = nn.init.xavier_normal_ + elif weight_init == 'normal': + init_func = nn.init.normal_ + else: + raise NotImplementedError + + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d): + if weight_init == 'normal': + init_func(m.weight, mean=0, std=0.001) + else: + init_func(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def roi_grid_pool(self, batch_dict): + """ + Args: + batch_dict: + batch_size: + rois: (B, num_rois, 7 + C) + spatial_features_2d: (B, C, H, W) + Returns: + + """ + batch_size = batch_dict['batch_size'] + rois = batch_dict['rois'].detach() + spatial_features_2d = batch_dict['spatial_features_2d'].detach() + height, width = spatial_features_2d.size(2), spatial_features_2d.size(3) + + dataset_cfg = batch_dict['dataset_cfg'] + min_x = dataset_cfg.POINT_CLOUD_RANGE[0] + min_y = dataset_cfg.POINT_CLOUD_RANGE[1] + voxel_size_x = dataset_cfg.DATA_PROCESSOR[-1].VOXEL_SIZE[0] + voxel_size_y = dataset_cfg.DATA_PROCESSOR[-1].VOXEL_SIZE[1] + down_sample_ratio = self.model_cfg.ROI_GRID_POOL.DOWNSAMPLE_RATIO + + pooled_features_list = [] + torch.backends.cudnn.enabled = False + for b_id in range(batch_size): + # Map global boxes coordinates to feature map coordinates + x1 = (rois[b_id, :, 0] - rois[b_id, :, 3] / 2 - min_x) / (voxel_size_x * down_sample_ratio) + x2 = (rois[b_id, :, 0] + rois[b_id, :, 3] / 2 - min_x) / (voxel_size_x * down_sample_ratio) + y1 = (rois[b_id, :, 1] - rois[b_id, :, 4] / 2 - min_y) / (voxel_size_y * down_sample_ratio) + y2 = (rois[b_id, :, 1] + rois[b_id, :, 4] / 2 - min_y) / (voxel_size_y * down_sample_ratio) + + angle, _ = common_utils.check_numpy_to_torch(rois[b_id, :, 6]) + + cosa = torch.cos(angle) + sina = torch.sin(angle) + + theta = torch.stack(( + (x2 - x1) / (width - 1) * cosa, (x2 - x1) / (width - 1) * (-sina), (x1 + x2 - width + 1) / (width - 1), + (y2 - y1) / (height - 1) * sina, (y2 - y1) / (height - 1) * cosa, (y1 + y2 - height + 1) / (height - 1) + ), dim=1).view(-1, 2, 3).float() + + grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE + grid = self.affine_grid( + theta, + torch.Size((rois.size(1), spatial_features_2d.size(1), grid_size, grid_size)) + ) + + pooled_features = self.grid_sample( + spatial_features_2d[b_id].unsqueeze(0).expand(rois.size(1), spatial_features_2d.size(1), height, width), + grid + ) + + pooled_features_list.append(pooled_features) + + torch.backends.cudnn.enabled = True + pooled_features = torch.cat(pooled_features_list, dim=0) + + return pooled_features + + def forward(self, batch_dict): + """ + :param input_data: input dict + :return: + """ + targets_dict = self.proposal_layer( + batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST'] + ) + if self.training: + targets_dict = self.assign_targets(batch_dict) + batch_dict['rois'] = targets_dict['rois'] + batch_dict['roi_labels'] = targets_dict['roi_labels'] + + # RoI aware pooling + pooled_features = self.roi_grid_pool(batch_dict) # (BxN, C, 7, 7) + batch_size_rcnn = pooled_features.shape[0] + + shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1)) + rcnn_iou = self.iou_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B*N, 1) + + if not self.training: + batch_dict['batch_cls_preds'] = rcnn_iou.view(batch_dict['batch_size'], -1, rcnn_iou.shape[-1]) + batch_dict['batch_box_preds'] = batch_dict['rois'] + batch_dict['cls_preds_normalized'] = False + else: + targets_dict['rcnn_iou'] = rcnn_iou + + self.forward_ret_dict = targets_dict + + return batch_dict + + def get_loss(self, tb_dict=None): + tb_dict = {} if tb_dict is None else tb_dict + rcnn_loss = 0 + rcnn_loss_cls, cls_tb_dict = self.get_box_iou_layer_loss(self.forward_ret_dict) + rcnn_loss += rcnn_loss_cls + tb_dict.update(cls_tb_dict) + + tb_dict['rcnn_loss'] = rcnn_loss.item() + return rcnn_loss, tb_dict + + def get_box_iou_layer_loss(self, forward_ret_dict): + loss_cfgs = self.model_cfg.LOSS_CONFIG + rcnn_iou = forward_ret_dict['rcnn_iou'] + rcnn_iou_labels = forward_ret_dict['rcnn_cls_labels'].view(-1) + rcnn_iou_flat = rcnn_iou.view(-1) + if loss_cfgs.IOU_LOSS == 'BinaryCrossEntropy': + batch_loss_iou = nn.functional.binary_cross_entropy_with_logits( + rcnn_iou_flat, + rcnn_iou_labels.float(), reduction='none' + ) + elif loss_cfgs.IOU_LOSS == 'L2': + batch_loss_iou = nn.functional.mse_loss(rcnn_iou_flat, rcnn_iou_labels, reduction='none') + elif loss_cfgs.IOU_LOSS == 'smoothL1': + diff = rcnn_iou_flat - rcnn_iou_labels + batch_loss_iou = loss_utils.WeightedSmoothL1Loss.smooth_l1_loss(diff, 1.0 / 9.0) + elif loss_cfgs.IOU_LOSS == 'focalbce': + batch_loss_iou = loss_utils.sigmoid_focal_cls_loss(rcnn_iou_flat, rcnn_iou_labels) + else: + raise NotImplementedError + + iou_valid_mask = (rcnn_iou_labels >= 0).float() + rcnn_loss_iou = (batch_loss_iou * iou_valid_mask).sum() / torch.clamp(iou_valid_mask.sum(), min=1.0) + + rcnn_loss_iou = rcnn_loss_iou * loss_cfgs.LOSS_WEIGHTS['rcnn_iou_weight'] + tb_dict = {'rcnn_loss_iou': rcnn_loss_iou.item()} + return rcnn_loss_iou, tb_dict diff --git a/toolbox/openpcdet/pcdet/models/roi_heads/target_assigner/__init__.py b/toolbox/openpcdet/pcdet/models/roi_heads/target_assigner/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/models/roi_heads/target_assigner/proposal_target_layer.py b/toolbox/openpcdet/pcdet/models/roi_heads/target_assigner/proposal_target_layer.py new file mode 100644 index 000000000..49f5f0a04 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/roi_heads/target_assigner/proposal_target_layer.py @@ -0,0 +1,228 @@ +import numpy as np +import torch +import torch.nn as nn + +from ....ops.iou3d_nms import iou3d_nms_utils + + +class ProposalTargetLayer(nn.Module): + def __init__(self, roi_sampler_cfg): + super().__init__() + self.roi_sampler_cfg = roi_sampler_cfg + + def forward(self, batch_dict): + """ + Args: + batch_dict: + batch_size: + rois: (B, num_rois, 7 + C) + roi_scores: (B, num_rois) + gt_boxes: (B, N, 7 + C + 1) + roi_labels: (B, num_rois) + Returns: + batch_dict: + rois: (B, M, 7 + C) + gt_of_rois: (B, M, 7 + C) + gt_iou_of_rois: (B, M) + roi_scores: (B, M) + roi_labels: (B, M) + reg_valid_mask: (B, M) + rcnn_cls_labels: (B, M) + """ + batch_rois, batch_gt_of_rois, batch_roi_ious, batch_roi_scores, batch_roi_labels = self.sample_rois_for_rcnn( + batch_dict=batch_dict + ) + # regression valid mask + reg_valid_mask = (batch_roi_ious > self.roi_sampler_cfg.REG_FG_THRESH).long() + + # classification label + if self.roi_sampler_cfg.CLS_SCORE_TYPE == 'cls': + batch_cls_labels = (batch_roi_ious > self.roi_sampler_cfg.CLS_FG_THRESH).long() + ignore_mask = (batch_roi_ious > self.roi_sampler_cfg.CLS_BG_THRESH) & \ + (batch_roi_ious < self.roi_sampler_cfg.CLS_FG_THRESH) + batch_cls_labels[ignore_mask > 0] = -1 + elif self.roi_sampler_cfg.CLS_SCORE_TYPE == 'roi_iou': + iou_bg_thresh = self.roi_sampler_cfg.CLS_BG_THRESH + iou_fg_thresh = self.roi_sampler_cfg.CLS_FG_THRESH + fg_mask = batch_roi_ious > iou_fg_thresh + bg_mask = batch_roi_ious < iou_bg_thresh + interval_mask = (fg_mask == 0) & (bg_mask == 0) + + batch_cls_labels = (fg_mask > 0).float() + batch_cls_labels[interval_mask] = \ + (batch_roi_ious[interval_mask] - iou_bg_thresh) / (iou_fg_thresh - iou_bg_thresh) + else: + raise NotImplementedError + + targets_dict = {'rois': batch_rois, 'gt_of_rois': batch_gt_of_rois, 'gt_iou_of_rois': batch_roi_ious, + 'roi_scores': batch_roi_scores, 'roi_labels': batch_roi_labels, + 'reg_valid_mask': reg_valid_mask, + 'rcnn_cls_labels': batch_cls_labels} + + return targets_dict + + def sample_rois_for_rcnn(self, batch_dict): + """ + Args: + batch_dict: + batch_size: + rois: (B, num_rois, 7 + C) + roi_scores: (B, num_rois) + gt_boxes: (B, N, 7 + C + 1) + roi_labels: (B, num_rois) + Returns: + + """ + batch_size = batch_dict['batch_size'] + rois = batch_dict['rois'] + roi_scores = batch_dict['roi_scores'] + roi_labels = batch_dict['roi_labels'] + gt_boxes = batch_dict['gt_boxes'] + + code_size = rois.shape[-1] + batch_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size) + batch_gt_of_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size + 1) + batch_roi_ious = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE) + batch_roi_scores = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE) + batch_roi_labels = rois.new_zeros((batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE), dtype=torch.long) + + for index in range(batch_size): + cur_roi, cur_gt, cur_roi_labels, cur_roi_scores = \ + rois[index], gt_boxes[index], roi_labels[index], roi_scores[index] + k = cur_gt.__len__() - 1 + while k >= 0 and cur_gt[k].sum() == 0: + k -= 1 + cur_gt = cur_gt[:k + 1] + cur_gt = cur_gt.new_zeros((1, cur_gt.shape[1])) if len(cur_gt) == 0 else cur_gt + + if self.roi_sampler_cfg.get('SAMPLE_ROI_BY_EACH_CLASS', False): + max_overlaps, gt_assignment = self.get_max_iou_with_same_class( + rois=cur_roi, roi_labels=cur_roi_labels, + gt_boxes=cur_gt[:, 0:7], gt_labels=cur_gt[:, -1].long() + ) + else: + iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi, cur_gt[:, 0:7]) # (M, N) + max_overlaps, gt_assignment = torch.max(iou3d, dim=1) + + sampled_inds = self.subsample_rois(max_overlaps=max_overlaps) + + batch_rois[index] = cur_roi[sampled_inds] + batch_roi_labels[index] = cur_roi_labels[sampled_inds] + batch_roi_ious[index] = max_overlaps[sampled_inds] + batch_roi_scores[index] = cur_roi_scores[sampled_inds] + batch_gt_of_rois[index] = cur_gt[gt_assignment[sampled_inds]] + + return batch_rois, batch_gt_of_rois, batch_roi_ious, batch_roi_scores, batch_roi_labels + + def subsample_rois(self, max_overlaps): + # sample fg, easy_bg, hard_bg + fg_rois_per_image = int(np.round(self.roi_sampler_cfg.FG_RATIO * self.roi_sampler_cfg.ROI_PER_IMAGE)) + fg_thresh = min(self.roi_sampler_cfg.REG_FG_THRESH, self.roi_sampler_cfg.CLS_FG_THRESH) + + fg_inds = ((max_overlaps >= fg_thresh)).nonzero().view(-1) + easy_bg_inds = ((max_overlaps < self.roi_sampler_cfg.CLS_BG_THRESH_LO)).nonzero().view(-1) + hard_bg_inds = ((max_overlaps < self.roi_sampler_cfg.REG_FG_THRESH) & + (max_overlaps >= self.roi_sampler_cfg.CLS_BG_THRESH_LO)).nonzero().view(-1) + + fg_num_rois = fg_inds.numel() + bg_num_rois = hard_bg_inds.numel() + easy_bg_inds.numel() + + if fg_num_rois > 0 and bg_num_rois > 0: + # sampling fg + fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois) + + rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).type_as(max_overlaps).long() + fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]] + + # sampling bg + bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE - fg_rois_per_this_image + bg_inds = self.sample_bg_inds( + hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO + ) + + elif fg_num_rois > 0 and bg_num_rois == 0: + # sampling fg + rand_num = np.floor(np.random.rand(self.roi_sampler_cfg.ROI_PER_IMAGE) * fg_num_rois) + rand_num = torch.from_numpy(rand_num).type_as(max_overlaps).long() + fg_inds = fg_inds[rand_num] + bg_inds = fg_inds[fg_inds < 0] # yield empty tensor + + elif bg_num_rois > 0 and fg_num_rois == 0: + # sampling bg + bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE + bg_inds = self.sample_bg_inds( + hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO + ) + else: + print('maxoverlaps:(min=%f, max=%f)' % (max_overlaps.min().item(), max_overlaps.max().item())) + print('ERROR: FG=%d, BG=%d' % (fg_num_rois, bg_num_rois)) + raise NotImplementedError + + sampled_inds = torch.cat((fg_inds, bg_inds), dim=0) + return sampled_inds + + @staticmethod + def sample_bg_inds(hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, hard_bg_ratio): + if hard_bg_inds.numel() > 0 and easy_bg_inds.numel() > 0: + hard_bg_rois_num = min(int(bg_rois_per_this_image * hard_bg_ratio), len(hard_bg_inds)) + easy_bg_rois_num = bg_rois_per_this_image - hard_bg_rois_num + + # sampling hard bg + rand_idx = torch.randint(low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)).long() + hard_bg_inds = hard_bg_inds[rand_idx] + + # sampling easy bg + rand_idx = torch.randint(low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)).long() + easy_bg_inds = easy_bg_inds[rand_idx] + + bg_inds = torch.cat([hard_bg_inds, easy_bg_inds], dim=0) + elif hard_bg_inds.numel() > 0 and easy_bg_inds.numel() == 0: + hard_bg_rois_num = bg_rois_per_this_image + # sampling hard bg + rand_idx = torch.randint(low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)).long() + bg_inds = hard_bg_inds[rand_idx] + elif hard_bg_inds.numel() == 0 and easy_bg_inds.numel() > 0: + easy_bg_rois_num = bg_rois_per_this_image + # sampling easy bg + rand_idx = torch.randint(low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)).long() + bg_inds = easy_bg_inds[rand_idx] + else: + raise NotImplementedError + + return bg_inds + + @staticmethod + def get_max_iou_with_same_class(rois, roi_labels, gt_boxes, gt_labels): + """ + Args: + rois: (N, 7) + roi_labels: (N) + gt_boxes: (N, ) + gt_labels: + + Returns: + + """ + """ + :param rois: (N, 7) + :param roi_labels: (N) + :param gt_boxes: (N, 8) + :return: + """ + max_overlaps = rois.new_zeros(rois.shape[0]) + gt_assignment = roi_labels.new_zeros(roi_labels.shape[0]) + + for k in range(gt_labels.min().item(), gt_labels.max().item() + 1): + roi_mask = (roi_labels == k) + gt_mask = (gt_labels == k) + if roi_mask.sum() > 0 and gt_mask.sum() > 0: + cur_roi = rois[roi_mask] + cur_gt = gt_boxes[gt_mask] + original_gt_assignment = gt_mask.nonzero().view(-1) + + iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi[:, :7], cur_gt[:, :7]) # (M, N) + cur_max_overlaps, cur_gt_assignment = torch.max(iou3d, dim=1) + max_overlaps[roi_mask] = cur_max_overlaps + gt_assignment[roi_mask] = original_gt_assignment[cur_gt_assignment] + + return max_overlaps, gt_assignment diff --git a/toolbox/openpcdet/pcdet/models/roi_heads/voxelrcnn_head.py b/toolbox/openpcdet/pcdet/models/roi_heads/voxelrcnn_head.py new file mode 100644 index 000000000..df861d22b --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/roi_heads/voxelrcnn_head.py @@ -0,0 +1,262 @@ +import torch +import torch.nn as nn +from ...ops.pointnet2.pointnet2_stack import voxel_pool_modules as voxelpool_stack_modules +from ...utils import common_utils +from .roi_head_template import RoIHeadTemplate + + +class VoxelRCNNHead(RoIHeadTemplate): + def __init__(self, backbone_channels, model_cfg, point_cloud_range, voxel_size, num_class=1, **kwargs): + super().__init__(num_class=num_class, model_cfg=model_cfg) + self.model_cfg = model_cfg + self.pool_cfg = model_cfg.ROI_GRID_POOL + LAYER_cfg = self.pool_cfg.POOL_LAYERS + self.point_cloud_range = point_cloud_range + self.voxel_size = voxel_size + + c_out = 0 + self.roi_grid_pool_layers = nn.ModuleList() + for src_name in self.pool_cfg.FEATURES_SOURCE: + mlps = LAYER_cfg[src_name].MLPS + for k in range(len(mlps)): + mlps[k] = [backbone_channels[src_name]] + mlps[k] + pool_layer = voxelpool_stack_modules.NeighborVoxelSAModuleMSG( + query_ranges=LAYER_cfg[src_name].QUERY_RANGES, + nsamples=LAYER_cfg[src_name].NSAMPLE, + radii=LAYER_cfg[src_name].POOL_RADIUS, + mlps=mlps, + pool_method=LAYER_cfg[src_name].POOL_METHOD, + ) + + self.roi_grid_pool_layers.append(pool_layer) + + c_out += sum([x[-1] for x in mlps]) + + + GRID_SIZE = self.model_cfg.ROI_GRID_POOL.GRID_SIZE + # c_out = sum([x[-1] for x in mlps]) + pre_channel = GRID_SIZE * GRID_SIZE * GRID_SIZE * c_out + + shared_fc_list = [] + for k in range(0, self.model_cfg.SHARED_FC.__len__()): + shared_fc_list.extend([ + nn.Linear(pre_channel, self.model_cfg.SHARED_FC[k], bias=False), + nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]), + nn.ReLU(inplace=True) + ]) + pre_channel = self.model_cfg.SHARED_FC[k] + + if k != self.model_cfg.SHARED_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0: + shared_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO)) + self.shared_fc_layer = nn.Sequential(*shared_fc_list) + + cls_fc_list = [] + for k in range(0, self.model_cfg.CLS_FC.__len__()): + cls_fc_list.extend([ + nn.Linear(pre_channel, self.model_cfg.CLS_FC[k], bias=False), + nn.BatchNorm1d(self.model_cfg.CLS_FC[k]), + nn.ReLU() + ]) + pre_channel = self.model_cfg.CLS_FC[k] + + if k != self.model_cfg.CLS_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0: + cls_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO)) + self.cls_fc_layers = nn.Sequential(*cls_fc_list) + self.cls_pred_layer = nn.Linear(pre_channel, self.num_class, bias=True) + + reg_fc_list = [] + for k in range(0, self.model_cfg.REG_FC.__len__()): + reg_fc_list.extend([ + nn.Linear(pre_channel, self.model_cfg.REG_FC[k], bias=False), + nn.BatchNorm1d(self.model_cfg.REG_FC[k]), + nn.ReLU() + ]) + pre_channel = self.model_cfg.REG_FC[k] + + if k != self.model_cfg.REG_FC.__len__() - 1 and self.model_cfg.DP_RATIO > 0: + reg_fc_list.append(nn.Dropout(self.model_cfg.DP_RATIO)) + self.reg_fc_layers = nn.Sequential(*reg_fc_list) + self.reg_pred_layer = nn.Linear(pre_channel, self.box_coder.code_size * self.num_class, bias=True) + + self.init_weights() + + def init_weights(self): + init_func = nn.init.xavier_normal_ + for module_list in [self.shared_fc_layer, self.cls_fc_layers, self.reg_fc_layers]: + for m in module_list.modules(): + if isinstance(m, nn.Linear): + init_func(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + nn.init.normal_(self.cls_pred_layer.weight, 0, 0.01) + nn.init.constant_(self.cls_pred_layer.bias, 0) + nn.init.normal_(self.reg_pred_layer.weight, mean=0, std=0.001) + nn.init.constant_(self.reg_pred_layer.bias, 0) + + # def _init_weights(self): + # init_func = nn.init.xavier_normal_ + # for m in self.modules(): + # if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear): + # init_func(m.weight) + # if m.bias is not None: + # nn.init.constant_(m.bias, 0) + # nn.init.normal_(self.reg_layers[-1].weight, mean=0, std=0.001) + + def roi_grid_pool(self, batch_dict): + """ + Args: + batch_dict: + batch_size: + rois: (B, num_rois, 7 + C) + point_coords: (num_points, 4) [bs_idx, x, y, z] + point_features: (num_points, C) + point_cls_scores: (N1 + N2 + N3 + ..., 1) + point_part_offset: (N1 + N2 + N3 + ..., 3) + Returns: + + """ + rois = batch_dict['rois'] + batch_size = batch_dict['batch_size'] + with_vf_transform = batch_dict.get('with_voxel_feature_transform', False) + + roi_grid_xyz, _ = self.get_global_grid_points_of_roi( + rois, grid_size=self.pool_cfg.GRID_SIZE + ) # (BxN, 6x6x6, 3) + # roi_grid_xyz: (B, Nx6x6x6, 3) + roi_grid_xyz = roi_grid_xyz.view(batch_size, -1, 3) + + # compute the voxel coordinates of grid points + roi_grid_coords_x = (roi_grid_xyz[:, :, 0:1] - self.point_cloud_range[0]) // self.voxel_size[0] + roi_grid_coords_y = (roi_grid_xyz[:, :, 1:2] - self.point_cloud_range[1]) // self.voxel_size[1] + roi_grid_coords_z = (roi_grid_xyz[:, :, 2:3] - self.point_cloud_range[2]) // self.voxel_size[2] + # roi_grid_coords: (B, Nx6x6x6, 3) + roi_grid_coords = torch.cat([roi_grid_coords_x, roi_grid_coords_y, roi_grid_coords_z], dim=-1) + + batch_idx = rois.new_zeros(batch_size, roi_grid_coords.shape[1], 1) + for bs_idx in range(batch_size): + batch_idx[bs_idx, :, 0] = bs_idx + # roi_grid_coords: (B, Nx6x6x6, 4) + # roi_grid_coords = torch.cat([batch_idx, roi_grid_coords], dim=-1) + # roi_grid_coords = roi_grid_coords.int() + roi_grid_batch_cnt = rois.new_zeros(batch_size).int().fill_(roi_grid_coords.shape[1]) + + pooled_features_list = [] + for k, src_name in enumerate(self.pool_cfg.FEATURES_SOURCE): + pool_layer = self.roi_grid_pool_layers[k] + cur_stride = batch_dict['multi_scale_3d_strides'][src_name] + cur_sp_tensors = batch_dict['multi_scale_3d_features'][src_name] + + if with_vf_transform: + cur_sp_tensors = batch_dict['multi_scale_3d_features_post'][src_name] + else: + cur_sp_tensors = batch_dict['multi_scale_3d_features'][src_name] + + # compute voxel center xyz and batch_cnt + cur_coords = cur_sp_tensors.indices + cur_voxel_xyz = common_utils.get_voxel_centers( + cur_coords[:, 1:4], + downsample_times=cur_stride, + voxel_size=self.voxel_size, + point_cloud_range=self.point_cloud_range + ) + cur_voxel_xyz_batch_cnt = cur_voxel_xyz.new_zeros(batch_size).int() + for bs_idx in range(batch_size): + cur_voxel_xyz_batch_cnt[bs_idx] = (cur_coords[:, 0] == bs_idx).sum() + # get voxel2point tensor + v2p_ind_tensor = common_utils.generate_voxel2pinds(cur_sp_tensors) + # compute the grid coordinates in this scale, in [batch_idx, x y z] order + cur_roi_grid_coords = roi_grid_coords // cur_stride + cur_roi_grid_coords = torch.cat([batch_idx, cur_roi_grid_coords], dim=-1) + cur_roi_grid_coords = cur_roi_grid_coords.int() + # voxel neighbor aggregation + pooled_features = pool_layer( + xyz=cur_voxel_xyz.contiguous(), + xyz_batch_cnt=cur_voxel_xyz_batch_cnt, + new_xyz=roi_grid_xyz.contiguous().view(-1, 3), + new_xyz_batch_cnt=roi_grid_batch_cnt, + new_coords=cur_roi_grid_coords.contiguous().view(-1, 4), + features=cur_sp_tensors.features.contiguous(), + voxel2point_indices=v2p_ind_tensor + ) + + pooled_features = pooled_features.view( + -1, self.pool_cfg.GRID_SIZE ** 3, + pooled_features.shape[-1] + ) # (BxN, 6x6x6, C) + pooled_features_list.append(pooled_features) + + ms_pooled_features = torch.cat(pooled_features_list, dim=-1) + + return ms_pooled_features + + + def get_global_grid_points_of_roi(self, rois, grid_size): + rois = rois.view(-1, rois.shape[-1]) + batch_size_rcnn = rois.shape[0] + + local_roi_grid_points = self.get_dense_grid_points(rois, batch_size_rcnn, grid_size) # (B, 6x6x6, 3) + global_roi_grid_points = common_utils.rotate_points_along_z( + local_roi_grid_points.clone(), rois[:, 6] + ).squeeze(dim=1) + global_center = rois[:, 0:3].clone() + global_roi_grid_points += global_center.unsqueeze(dim=1) + return global_roi_grid_points, local_roi_grid_points + + @staticmethod + def get_dense_grid_points(rois, batch_size_rcnn, grid_size): + faked_features = rois.new_ones((grid_size, grid_size, grid_size)) + dense_idx = faked_features.nonzero() # (N, 3) [x_idx, y_idx, z_idx] + dense_idx = dense_idx.repeat(batch_size_rcnn, 1, 1).float() # (B, 6x6x6, 3) + + local_roi_size = rois.view(batch_size_rcnn, -1)[:, 3:6] + roi_grid_points = (dense_idx + 0.5) / grid_size * local_roi_size.unsqueeze(dim=1) \ + - (local_roi_size.unsqueeze(dim=1) / 2) # (B, 6x6x6, 3) + return roi_grid_points + + def forward(self, batch_dict): + """ + :param input_data: input dict + :return: + """ + + targets_dict = self.proposal_layer( + batch_dict, nms_config=self.model_cfg.NMS_CONFIG['TRAIN' if self.training else 'TEST'] + ) + if self.training: + targets_dict = self.assign_targets(batch_dict) + batch_dict['rois'] = targets_dict['rois'] + batch_dict['roi_labels'] = targets_dict['roi_labels'] + + # RoI aware pooling + pooled_features = self.roi_grid_pool(batch_dict) # (BxN, 6x6x6, C) + + # Box Refinement + pooled_features = pooled_features.view(pooled_features.size(0), -1) + shared_features = self.shared_fc_layer(pooled_features) + rcnn_cls = self.cls_pred_layer(self.cls_fc_layers(shared_features)) + rcnn_reg = self.reg_pred_layer(self.reg_fc_layers(shared_features)) + + # grid_size = self.model_cfg.ROI_GRID_POOL.GRID_SIZE + # batch_size_rcnn = pooled_features.shape[0] + # pooled_features = pooled_features.permute(0, 2, 1).\ + # contiguous().view(batch_size_rcnn, -1, grid_size, grid_size, grid_size) # (BxN, C, 6, 6, 6) + + # shared_features = self.shared_fc_layer(pooled_features.view(batch_size_rcnn, -1, 1)) + # rcnn_cls = self.cls_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, 1 or 2) + # rcnn_reg = self.reg_layers(shared_features).transpose(1, 2).contiguous().squeeze(dim=1) # (B, C) + + if not self.training: + batch_cls_preds, batch_box_preds = self.generate_predicted_boxes( + batch_size=batch_dict['batch_size'], rois=batch_dict['rois'], cls_preds=rcnn_cls, box_preds=rcnn_reg + ) + batch_dict['batch_cls_preds'] = batch_cls_preds + batch_dict['batch_box_preds'] = batch_box_preds + batch_dict['cls_preds_normalized'] = False + else: + targets_dict['rcnn_cls'] = rcnn_cls + targets_dict['rcnn_reg'] = rcnn_reg + + self.forward_ret_dict = targets_dict + + return batch_dict diff --git a/toolbox/openpcdet/pcdet/models/view_transforms/__init__.py b/toolbox/openpcdet/pcdet/models/view_transforms/__init__.py new file mode 100644 index 000000000..e182aedf8 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/view_transforms/__init__.py @@ -0,0 +1,4 @@ +from .depth_lss import DepthLSSTransform +__all__ = { + 'DepthLSSTransform': DepthLSSTransform, +} \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/models/view_transforms/depth_lss.py b/toolbox/openpcdet/pcdet/models/view_transforms/depth_lss.py new file mode 100644 index 000000000..8fee40621 --- /dev/null +++ b/toolbox/openpcdet/pcdet/models/view_transforms/depth_lss.py @@ -0,0 +1,258 @@ +import torch +from torch import nn +from pcdet.ops.bev_pool import bev_pool + + +def gen_dx_bx(xbound, ybound, zbound): + dx = torch.Tensor([row[2] for row in [xbound, ybound, zbound]]) + bx = torch.Tensor([row[0] + row[2] / 2.0 for row in [xbound, ybound, zbound]]) + nx = torch.LongTensor( + [(row[1] - row[0]) / row[2] for row in [xbound, ybound, zbound]] + ) + return dx, bx, nx + + +class DepthLSSTransform(nn.Module): + """ + This module implements LSS, which lists images into 3D and then splats onto bev features. + This code is adapted from https://github.com/mit-han-lab/bevfusion/ with minimal modifications. + """ + def __init__(self, model_cfg): + super().__init__() + self.model_cfg = model_cfg + in_channel = self.model_cfg.IN_CHANNEL + out_channel = self.model_cfg.OUT_CHANNEL + self.image_size = self.model_cfg.IMAGE_SIZE + self.feature_size = self.model_cfg.FEATURE_SIZE + xbound = self.model_cfg.XBOUND + ybound = self.model_cfg.YBOUND + zbound = self.model_cfg.ZBOUND + self.dbound = self.model_cfg.DBOUND + downsample = self.model_cfg.DOWNSAMPLE + + dx, bx, nx = gen_dx_bx(xbound, ybound, zbound) + self.dx = nn.Parameter(dx, requires_grad=False) + self.bx = nn.Parameter(bx, requires_grad=False) + self.nx = nn.Parameter(nx, requires_grad=False) + + self.C = out_channel + self.frustum = self.create_frustum() + self.D = self.frustum.shape[0] + + self.dtransform = nn.Sequential( + nn.Conv2d(1, 8, 1), + nn.BatchNorm2d(8), + nn.ReLU(True), + nn.Conv2d(8, 32, 5, stride=4, padding=2), + nn.BatchNorm2d(32), + nn.ReLU(True), + nn.Conv2d(32, 64, 5, stride=2, padding=2), + nn.BatchNorm2d(64), + nn.ReLU(True), + ) + self.depthnet = nn.Sequential( + nn.Conv2d(in_channel + 64, in_channel, 3, padding=1), + nn.BatchNorm2d(in_channel), + nn.ReLU(True), + nn.Conv2d(in_channel, in_channel, 3, padding=1), + nn.BatchNorm2d(in_channel), + nn.ReLU(True), + nn.Conv2d(in_channel, self.D + self.C, 1), + ) + if downsample > 1: + assert downsample == 2, downsample + self.downsample = nn.Sequential( + nn.Conv2d(out_channel, out_channel, 3, padding=1, bias=False), + nn.BatchNorm2d(out_channel), + nn.ReLU(True), + nn.Conv2d(out_channel, out_channel, 3, stride=downsample, padding=1, bias=False), + nn.BatchNorm2d(out_channel), + nn.ReLU(True), + nn.Conv2d(out_channel, out_channel, 3, padding=1, bias=False), + nn.BatchNorm2d(out_channel), + nn.ReLU(True), + ) + else: + self.downsample = nn.Identity() + + def create_frustum(self): + iH, iW = self.image_size + fH, fW = self.feature_size + + ds = torch.arange(*self.dbound, dtype=torch.float).view(-1, 1, 1).expand(-1, fH, fW) + D, _, _ = ds.shape + xs = torch.linspace(0, iW - 1, fW, dtype=torch.float).view(1, 1, fW).expand(D, fH, fW) + ys = torch.linspace(0, iH - 1, fH, dtype=torch.float).view(1, fH, 1).expand(D, fH, fW) + frustum = torch.stack((xs, ys, ds), -1) + + return nn.Parameter(frustum, requires_grad=False) + + def get_geometry(self, camera2lidar_rots, camera2lidar_trans, intrins, post_rots, post_trans, **kwargs): + + camera2lidar_rots = camera2lidar_rots.to(torch.float) + camera2lidar_trans = camera2lidar_trans.to(torch.float) + intrins = intrins.to(torch.float) + post_rots = post_rots.to(torch.float) + post_trans = post_trans.to(torch.float) + + B, N, _ = camera2lidar_trans.shape + + # undo post-transformation + # B x N x D x H x W x 3 + points = self.frustum - post_trans.view(B, N, 1, 1, 1, 3) + points = torch.inverse(post_rots).view(B, N, 1, 1, 1, 3, 3).matmul(points.unsqueeze(-1)) + + # cam_to_lidar + points = torch.cat((points[:, :, :, :, :, :2] * points[:, :, :, :, :, 2:3], points[:, :, :, :, :, 2:3]), 5) + combine = camera2lidar_rots.matmul(torch.inverse(intrins)) + points = combine.view(B, N, 1, 1, 1, 3, 3).matmul(points).squeeze(-1) + points += camera2lidar_trans.view(B, N, 1, 1, 1, 3) + + if "extra_rots" in kwargs: + extra_rots = kwargs["extra_rots"] + points = extra_rots.view(B, 1, 1, 1, 1, 3, 3).repeat(1, N, 1, 1, 1, 1, 1) \ + .matmul(points.unsqueeze(-1)).squeeze(-1) + + if "extra_trans" in kwargs: + extra_trans = kwargs["extra_trans"] + points += extra_trans.view(B, 1, 1, 1, 1, 3).repeat(1, N, 1, 1, 1, 1) + + return points + + def bev_pool(self, geom_feats, x): + geom_feats = geom_feats.to(torch.float) + x = x.to(torch.float) + + B, N, D, H, W, C = x.shape + Nprime = B * N * D * H * W + + # flatten x + x = x.reshape(Nprime, C) + + # flatten indices + geom_feats = ((geom_feats - (self.bx - self.dx / 2.0)) / self.dx).long() + geom_feats = geom_feats.view(Nprime, 3) + batch_ix = torch.cat([torch.full([Nprime // B, 1], ix, device=x.device, dtype=torch.long) for ix in range(B)]) + geom_feats = torch.cat((geom_feats, batch_ix), 1) + + # filter out points that are outside box + kept = ( + (geom_feats[:, 0] >= 0) + & (geom_feats[:, 0] < self.nx[0]) + & (geom_feats[:, 1] >= 0) + & (geom_feats[:, 1] < self.nx[1]) + & (geom_feats[:, 2] >= 0) + & (geom_feats[:, 2] < self.nx[2]) + ) + x = x[kept] + geom_feats = geom_feats[kept] + x = bev_pool(x, geom_feats, B, self.nx[2], self.nx[0], self.nx[1]) + + # collapse Z + final = torch.cat(x.unbind(dim=2), 1) + + return final + + def get_cam_feats(self, x, d): + B, N, C, fH, fW = x.shape + + d = d.view(B * N, *d.shape[2:]) + x = x.view(B * N, C, fH, fW) + + d = self.dtransform(d) + x = torch.cat([d, x], dim=1) + x = self.depthnet(x) + + depth = x[:, : self.D].softmax(dim=1) + x = depth.unsqueeze(1) * x[:, self.D : (self.D + self.C)].unsqueeze(2) + + x = x.view(B, N, self.C, self.D, fH, fW) + x = x.permute(0, 1, 3, 4, 5, 2) + return x + + def forward(self, batch_dict): + """ + Args: + batch_dict: + image_fpn (list[tensor]): image features after image neck + + Returns: + batch_dict: + spatial_features_img (tensor): bev features from image modality + """ + x = batch_dict['image_fpn'] + x = x[0] + BN, C, H, W = x.size() + img = x.view(int(BN/6), 6, C, H, W) + + camera_intrinsics = batch_dict['camera_intrinsics'] + camera2lidar = batch_dict['camera2lidar'] + img_aug_matrix = batch_dict['img_aug_matrix'] + lidar_aug_matrix = batch_dict['lidar_aug_matrix'] + lidar2image = batch_dict['lidar2image'] + + intrins = camera_intrinsics[..., :3, :3] + post_rots = img_aug_matrix[..., :3, :3] + post_trans = img_aug_matrix[..., :3, 3] + camera2lidar_rots = camera2lidar[..., :3, :3] + camera2lidar_trans = camera2lidar[..., :3, 3] + + points = batch_dict['points'] + + batch_size = BN // 6 + depth = torch.zeros(batch_size, img.shape[1], 1, *self.image_size).to(points[0].device) + + for b in range(batch_size): + batch_mask = points[:,0] == b + cur_coords = points[batch_mask][:, 1:4] + cur_img_aug_matrix = img_aug_matrix[b] + cur_lidar_aug_matrix = lidar_aug_matrix[b] + cur_lidar2image = lidar2image[b] + + # inverse aug + cur_coords -= cur_lidar_aug_matrix[:3, 3] + cur_coords = torch.inverse(cur_lidar_aug_matrix[:3, :3]).matmul( + cur_coords.transpose(1, 0) + ) + # lidar2image + cur_coords = cur_lidar2image[:, :3, :3].matmul(cur_coords) + cur_coords += cur_lidar2image[:, :3, 3].reshape(-1, 3, 1) + # get 2d coords + dist = cur_coords[:, 2, :] + cur_coords[:, 2, :] = torch.clamp(cur_coords[:, 2, :], 1e-5, 1e5) + cur_coords[:, :2, :] /= cur_coords[:, 2:3, :] + + # do image aug + cur_coords = cur_img_aug_matrix[:, :3, :3].matmul(cur_coords) + cur_coords += cur_img_aug_matrix[:, :3, 3].reshape(-1, 3, 1) + cur_coords = cur_coords[:, :2, :].transpose(1, 2) + + # normalize coords for grid sample + cur_coords = cur_coords[..., [1, 0]] + + # filter points outside of images + on_img = ( + (cur_coords[..., 0] < self.image_size[0]) + & (cur_coords[..., 0] >= 0) + & (cur_coords[..., 1] < self.image_size[1]) + & (cur_coords[..., 1] >= 0) + ) + for c in range(on_img.shape[0]): + masked_coords = cur_coords[c, on_img[c]].long() + masked_dist = dist[c, on_img[c]] + depth[b, c, 0, masked_coords[:, 0], masked_coords[:, 1]] = masked_dist + + extra_rots = lidar_aug_matrix[..., :3, :3] + extra_trans = lidar_aug_matrix[..., :3, 3] + geom = self.get_geometry( + camera2lidar_rots, camera2lidar_trans, intrins, post_rots, + post_trans, extra_rots=extra_rots, extra_trans=extra_trans, + ) + # use points depth to assist the depth prediction in images + x = self.get_cam_feats(img, depth) + x = self.bev_pool(geom, x) + x = self.downsample(x) + # convert bev features from (b, c, x, y) to (b, c, y, x) + x = x.permute(0, 1, 3, 2) + batch_dict['spatial_features_img'] = x + return batch_dict \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/ops/__init__.py b/toolbox/openpcdet/pcdet/ops/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/ops/bev_pool/__init__.py b/toolbox/openpcdet/pcdet/ops/bev_pool/__init__.py new file mode 100644 index 000000000..b60058a92 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/bev_pool/__init__.py @@ -0,0 +1 @@ +from .bev_pool import bev_pool \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/ops/bev_pool/bev_pool.py b/toolbox/openpcdet/pcdet/ops/bev_pool/bev_pool.py new file mode 100644 index 000000000..5769a40a7 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/bev_pool/bev_pool.py @@ -0,0 +1,97 @@ +import torch + +from . import bev_pool_ext + +__all__ = ["bev_pool"] + + +class QuickCumsum(torch.autograd.Function): + @staticmethod + def forward(ctx, x, geom_feats, ranks): + x = x.cumsum(0) + kept = torch.ones(x.shape[0], device=x.device, dtype=torch.bool) + kept[:-1] = ranks[1:] != ranks[:-1] + + x, geom_feats = x[kept], geom_feats[kept] + x = torch.cat((x[:1], x[1:] - x[:-1])) + + # save kept for backward + ctx.save_for_backward(kept) + + # no gradient for geom_feats + ctx.mark_non_differentiable(geom_feats) + + return x, geom_feats + + @staticmethod + def backward(ctx, gradx, gradgeom): + (kept,) = ctx.saved_tensors + back = torch.cumsum(kept, 0) + back[kept] -= 1 + + val = gradx[back] + + return val, None, None + + +class QuickCumsumCuda(torch.autograd.Function): + @staticmethod + def forward(ctx, x, geom_feats, ranks, B, D, H, W): + kept = torch.ones(x.shape[0], device=x.device, dtype=torch.bool) + kept[1:] = ranks[1:] != ranks[:-1] + interval_starts = torch.where(kept)[0].int() + interval_lengths = torch.zeros_like(interval_starts) + interval_lengths[:-1] = interval_starts[1:] - interval_starts[:-1] + interval_lengths[-1] = x.shape[0] - interval_starts[-1] + geom_feats = geom_feats.int() + + out = bev_pool_ext.bev_pool_forward( + x, + geom_feats, + interval_lengths, + interval_starts, + B, + D, + H, + W, + ) + + ctx.save_for_backward(interval_starts, interval_lengths, geom_feats) + ctx.saved_shapes = B, D, H, W + return out + + @staticmethod + def backward(ctx, out_grad): + interval_starts, interval_lengths, geom_feats = ctx.saved_tensors + B, D, H, W = ctx.saved_shapes + + out_grad = out_grad.contiguous() + x_grad = bev_pool_ext.bev_pool_backward( + out_grad, + geom_feats, + interval_lengths, + interval_starts, + B, + D, + H, + W, + ) + + return x_grad, None, None, None, None, None, None + + +def bev_pool(feats, coords, B, D, H, W): + assert feats.shape[0] == coords.shape[0] + + ranks = ( + coords[:, 0] * (W * D * B) + + coords[:, 1] * (D * B) + + coords[:, 2] * B + + coords[:, 3] + ) + indices = ranks.argsort() + feats, coords, ranks = feats[indices], coords[indices], ranks[indices] + + x = QuickCumsumCuda.apply(feats, coords, ranks, B, D, H, W) + x = x.permute(0, 4, 1, 2, 3).contiguous() + return x diff --git a/toolbox/openpcdet/pcdet/ops/bev_pool/src/bev_pool.cpp b/toolbox/openpcdet/pcdet/ops/bev_pool/src/bev_pool.cpp new file mode 100644 index 000000000..c1faf9bed --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/bev_pool/src/bev_pool.cpp @@ -0,0 +1,94 @@ +#include +#include + +// CUDA function declarations +void bev_pool(int b, int d, int h, int w, int n, int c, int n_intervals, const float* x, + const int* geom_feats, const int* interval_starts, const int* interval_lengths, float* out); + +void bev_pool_grad(int b, int d, int h, int w, int n, int c, int n_intervals, const float* out_grad, + const int* geom_feats, const int* interval_starts, const int* interval_lengths, float* x_grad); + + +/* + Function: pillar pooling (forward, cuda) + Args: + x : input features, FloatTensor[n, c] + geom_feats : input coordinates, IntTensor[n, 4] + interval_lengths : starting position for pooled point, IntTensor[n_intervals] + interval_starts : how many points in each pooled point, IntTensor[n_intervals] + Return: + out : output features, FloatTensor[b, d, h, w, c] +*/ +at::Tensor bev_pool_forward( + const at::Tensor _x, + const at::Tensor _geom_feats, + const at::Tensor _interval_lengths, + const at::Tensor _interval_starts, + int b, int d, int h, int w +) { + int n = _x.size(0); + int c = _x.size(1); + int n_intervals = _interval_lengths.size(0); + const at::cuda::OptionalCUDAGuard device_guard(device_of(_x)); + const float* x = _x.data_ptr(); + const int* geom_feats = _geom_feats.data_ptr(); + const int* interval_lengths = _interval_lengths.data_ptr(); + const int* interval_starts = _interval_starts.data_ptr(); + + auto options = + torch::TensorOptions().dtype(_x.dtype()).device(_x.device()); + at::Tensor _out = torch::zeros({b, d, h, w, c}, options); + float* out = _out.data_ptr(); + bev_pool( + b, d, h, w, n, c, n_intervals, x, + geom_feats, interval_starts, interval_lengths, out + ); + return _out; +} + + +/* + Function: pillar pooling (backward, cuda) + Args: + out_grad : input features, FloatTensor[b, d, h, w, c] + geom_feats : input coordinates, IntTensor[n, 4] + interval_lengths : starting position for pooled point, IntTensor[n_intervals] + interval_starts : how many points in each pooled point, IntTensor[n_intervals] + Return: + x_grad : output features, FloatTensor[n, 4] +*/ +at::Tensor bev_pool_backward( + const at::Tensor _out_grad, + const at::Tensor _geom_feats, + const at::Tensor _interval_lengths, + const at::Tensor _interval_starts, + int b, int d, int h, int w +) { + int n = _geom_feats.size(0); + int c = _out_grad.size(4); + int n_intervals = _interval_lengths.size(0); + const at::cuda::OptionalCUDAGuard device_guard(device_of(_out_grad)); + const float* out_grad = _out_grad.data_ptr(); + const int* geom_feats = _geom_feats.data_ptr(); + const int* interval_lengths = _interval_lengths.data_ptr(); + const int* interval_starts = _interval_starts.data_ptr(); + + auto options = + torch::TensorOptions().dtype(_out_grad.dtype()).device(_out_grad.device()); + at::Tensor _x_grad = torch::zeros({n, c}, options); + float* x_grad = _x_grad.data_ptr(); + + bev_pool_grad( + b, d, h, w, n, c, n_intervals, out_grad, + geom_feats, interval_starts, interval_lengths, x_grad + ); + + return _x_grad; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("bev_pool_forward", &bev_pool_forward, + "bev_pool_forward"); + m.def("bev_pool_backward", &bev_pool_backward, + "bev_pool_backward"); +} diff --git a/toolbox/openpcdet/pcdet/ops/bev_pool/src/bev_pool_cuda.cu b/toolbox/openpcdet/pcdet/ops/bev_pool/src/bev_pool_cuda.cu new file mode 100644 index 000000000..af3b85767 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/bev_pool/src/bev_pool_cuda.cu @@ -0,0 +1,98 @@ +#include +#include + +/* + Function: pillar pooling + Args: + b : batch size + d : depth of the feature map + h : height of pooled feature map + w : width of pooled feature map + n : number of input points + c : number of channels + n_intervals : number of unique points + x : input features, FloatTensor[n, c] + geom_feats : input coordinates, IntTensor[n, 4] + interval_lengths : starting position for pooled point, IntTensor[n_intervals] + interval_starts : how many points in each pooled point, IntTensor[n_intervals] + out : output features, FloatTensor[b, d, h, w, c] +*/ +__global__ void bev_pool_kernel(int b, int d, int h, int w, int n, int c, int n_intervals, + const float *__restrict__ x, + const int *__restrict__ geom_feats, + const int *__restrict__ interval_starts, + const int *__restrict__ interval_lengths, + float* __restrict__ out) { + int idx = blockIdx.x * blockDim.x + threadIdx.x; + int index = idx / c; + int cur_c = idx % c; + if (index >= n_intervals) return; + int interval_start = interval_starts[index]; + int interval_length = interval_lengths[index]; + const int* cur_geom_feats = geom_feats + interval_start * 4; + const float* cur_x = x + interval_start * c + cur_c; + float* cur_out = out + cur_geom_feats[3] * d * h * w * c + + cur_geom_feats[2] * h * w * c + cur_geom_feats[0] * w * c + + cur_geom_feats[1] * c + cur_c; + float psum = 0; + for(int i = 0; i < interval_length; i++){ + psum += cur_x[i * c]; + } + *cur_out = psum; +} + + +/* + Function: pillar pooling backward + Args: + b : batch size + d : depth of the feature map + h : height of pooled feature map + w : width of pooled feature map + n : number of input points + c : number of channels + n_intervals : number of unique points + out_grad : gradient of the BEV fmap from top, FloatTensor[b, d, h, w, c] + geom_feats : input coordinates, IntTensor[n, 4] + interval_lengths : starting position for pooled point, IntTensor[n_intervals] + interval_starts : how many points in each pooled point, IntTensor[n_intervals] + x_grad : gradient of the image fmap, FloatTensor +*/ +__global__ void bev_pool_grad_kernel(int b, int d, int h, int w, int n, int c, int n_intervals, + const float *__restrict__ out_grad, + const int *__restrict__ geom_feats, + const int *__restrict__ interval_starts, + const int *__restrict__ interval_lengths, + float* __restrict__ x_grad) { + int idx = blockIdx.x * blockDim.x + threadIdx.x; + int index = idx / c; + int cur_c = idx % c; + if (index >= n_intervals) return; + int interval_start = interval_starts[index]; + int interval_length = interval_lengths[index]; + + const int* cur_geom_feats = geom_feats + interval_start * 4; + float* cur_x_grad = x_grad + interval_start * c + cur_c; + + const float* cur_out_grad = out_grad + cur_geom_feats[3] * d * h * w * c + + cur_geom_feats[2] * h * w * c + cur_geom_feats[0] * w * c + + cur_geom_feats[1] * c + cur_c; + for(int i = 0; i < interval_length; i++){ + cur_x_grad[i * c] = *cur_out_grad; + } + +} + +void bev_pool(int b, int d, int h, int w, int n, int c, int n_intervals, const float* x, + const int* geom_feats, const int* interval_starts, const int* interval_lengths, float* out) { + bev_pool_kernel<<<(int)ceil(((float)n_intervals * c / 256)), 256>>>( + b, d, h, w, n, c, n_intervals, x, geom_feats, interval_starts, interval_lengths, out + ); +} + +void bev_pool_grad(int b, int d, int h, int w, int n, int c, int n_intervals, const float* out_grad, + const int* geom_feats, const int* interval_starts, const int* interval_lengths, float* x_grad) { + bev_pool_grad_kernel<<<(int)ceil(((float)n_intervals * c / 256)), 256>>>( + b, d, h, w, n, c, n_intervals, out_grad, geom_feats, interval_starts, interval_lengths, x_grad + ); +} diff --git a/toolbox/openpcdet/pcdet/ops/ingroup_inds/ingroup_inds_op.py b/toolbox/openpcdet/pcdet/ops/ingroup_inds/ingroup_inds_op.py new file mode 100644 index 000000000..5c9b6e0e9 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/ingroup_inds/ingroup_inds_op.py @@ -0,0 +1,31 @@ +import torch + +try: + from . import ingroup_inds_cuda + # import ingroup_indices +except ImportError: + ingroup_indices = None + print('Can not import ingroup indices') + +ingroup_indices = ingroup_inds_cuda + +from torch.autograd import Function +class IngroupIndicesFunction(Function): + + @staticmethod + def forward(ctx, group_inds): + + out_inds = torch.zeros_like(group_inds) - 1 + + ingroup_indices.forward(group_inds, out_inds) + + ctx.mark_non_differentiable(out_inds) + + return out_inds + + @staticmethod + def backward(ctx, g): + + return None + +ingroup_inds = IngroupIndicesFunction.apply \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/ops/ingroup_inds/src/error.cuh b/toolbox/openpcdet/pcdet/ops/ingroup_inds/src/error.cuh new file mode 100644 index 000000000..2dd5a8775 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/ingroup_inds/src/error.cuh @@ -0,0 +1,18 @@ +#pragma once +#include + +#define CHECK_CALL(call) \ +do \ +{ \ + const cudaError_t error_code = call; \ + if (error_code != cudaSuccess) \ + { \ + printf("CUDA Error:\n"); \ + printf(" File: %s\n", __FILE__); \ + printf(" Line: %d\n", __LINE__); \ + printf(" Error code: %d\n", error_code); \ + printf(" Error text: %s\n", \ + cudaGetErrorString(error_code)); \ + exit(1); \ + } \ +} while (0) \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/ops/ingroup_inds/src/ingroup_inds.cpp b/toolbox/openpcdet/pcdet/ops/ingroup_inds/src/ingroup_inds.cpp new file mode 100644 index 000000000..8bd3389eb --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/ingroup_inds/src/ingroup_inds.cpp @@ -0,0 +1,54 @@ +#include +#include +#include +#include + +#define CHECK_CUDA(x) \ + TORCH_CHECK(x.device().is_cuda(), #x, " must be a CUDAtensor ") +#define CHECK_CONTIGUOUS(x) \ + TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ") +#define CHECK_INPUT(x) \ + CHECK_CUDA(x); \ + CHECK_CONTIGUOUS(x) + + +void ingroup_inds_launcher( + const long *group_inds_data, + long *out_inds_data, + int N, + int max_group_id +); + + +void ingroup_inds_gpu( + at::Tensor group_inds, + at::Tensor out_inds +); + +void ingroup_inds_gpu( + at::Tensor group_inds, + at::Tensor out_inds +) { + + CHECK_INPUT(group_inds); + CHECK_INPUT(out_inds); + int N = group_inds.size(0); + int max_group_id = group_inds.max().item().toLong(); + + + long *group_inds_data = group_inds.data_ptr(); + long *out_inds_data = out_inds.data_ptr(); + + ingroup_inds_launcher( + group_inds_data, + out_inds_data, + N, + max_group_id + ); + +} + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &ingroup_inds_gpu, "cuda version of get_inner_win_inds of SST"); +} \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/ops/ingroup_inds/src/ingroup_inds_kernel.cu b/toolbox/openpcdet/pcdet/ops/ingroup_inds/src/ingroup_inds_kernel.cu new file mode 100644 index 000000000..788284813 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/ingroup_inds/src/ingroup_inds_kernel.cu @@ -0,0 +1,77 @@ +#include +#include +#include +#include +#include +#include +#include "cuda_fp16.h" + +#define CHECK_CALL(call) \ +do \ +{ \ + const cudaError_t error_code = call; \ + if (error_code != cudaSuccess) \ + { \ + printf("CUDA Error:\n"); \ + printf(" File: %s\n", __FILE__); \ + printf(" Line: %d\n", __LINE__); \ + printf(" Error code: %d\n", error_code); \ + printf(" Error text: %s\n", \ + cudaGetErrorString(error_code)); \ + exit(1); \ + } \ +} while (0) + +#define THREADS_PER_BLOCK 256 +#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) + +// #define DEBUG +// #define ASSERTION + +__global__ void ingroup_inds_kernel( + const long *group_inds, + long *out_inds, + int *ingroup_counter, + int N +) { + + int idx = blockIdx.x * blockDim.x + threadIdx.x; + if (idx >= N) return; + long this_group_id = group_inds[idx]; + + int cnt = atomicAdd(&ingroup_counter[this_group_id], 1); + out_inds[idx] = cnt; +} + + + void ingroup_inds_launcher( + const long *group_inds, + long *out_inds, + int N, + int max_group_id + ) { + + int *ingroup_counter = NULL; + CHECK_CALL(cudaMalloc(&ingroup_counter, (max_group_id + 1) * sizeof(int))); + CHECK_CALL(cudaMemset(ingroup_counter, 0, (max_group_id + 1) * sizeof(int))); + + dim3 blocks(DIVUP(N, THREADS_PER_BLOCK)); + dim3 threads(THREADS_PER_BLOCK); + + ingroup_inds_kernel<<>>( + group_inds, + out_inds, + ingroup_counter, + N + ); + + cudaFree(ingroup_counter); + + #ifdef DEBUG + CHECK_CALL(cudaGetLastError()); + CHECK_CALL(cudaDeviceSynchronize()); + #endif + + return; + +} \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/ops/iou3d_nms/__init__.py b/toolbox/openpcdet/pcdet/ops/iou3d_nms/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/ops/iou3d_nms/iou3d_nms_utils.py b/toolbox/openpcdet/pcdet/ops/iou3d_nms/iou3d_nms_utils.py new file mode 100644 index 000000000..b63ca0d93 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/iou3d_nms/iou3d_nms_utils.py @@ -0,0 +1,189 @@ +""" +3D IoU Calculation and Rotated NMS +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +""" +import torch + +from ...utils import common_utils +from . import iou3d_nms_cuda + + +def boxes_bev_iou_cpu(boxes_a, boxes_b): + """ + Args: + boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] + + Returns: + ans_iou: (N, M) + """ + boxes_a, is_numpy = common_utils.check_numpy_to_torch(boxes_a) + boxes_b, is_numpy = common_utils.check_numpy_to_torch(boxes_b) + assert not (boxes_a.is_cuda or boxes_b.is_cuda), 'Only support CPU tensors' + assert boxes_a.shape[1] == 7 and boxes_b.shape[1] == 7 + ans_iou = boxes_a.new_zeros(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))) + iou3d_nms_cuda.boxes_iou_bev_cpu(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou) + + return ans_iou.numpy() if is_numpy else ans_iou + + +def boxes_iou_bev(boxes_a, boxes_b): + """ + Args: + boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] + + Returns: + ans_iou: (N, M) + """ + assert boxes_a.shape[1] == boxes_b.shape[1] == 7 + ans_iou = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_() + + iou3d_nms_cuda.boxes_iou_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), ans_iou) + + return ans_iou + + +def boxes_iou3d_gpu(boxes_a, boxes_b): + """ + Args: + boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] + + Returns: + ans_iou: (N, M) + """ + assert boxes_a.shape[1] == boxes_b.shape[1] == 7 + + # height overlap + boxes_a_height_max = (boxes_a[:, 2] + boxes_a[:, 5] / 2).view(-1, 1) + boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, 5] / 2).view(-1, 1) + boxes_b_height_max = (boxes_b[:, 2] + boxes_b[:, 5] / 2).view(1, -1) + boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, 5] / 2).view(1, -1) + + # bev overlap + overlaps_bev = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], boxes_b.shape[0]))).zero_() # (N, M) + iou3d_nms_cuda.boxes_overlap_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), overlaps_bev) + + max_of_min = torch.max(boxes_a_height_min, boxes_b_height_min) + min_of_max = torch.min(boxes_a_height_max, boxes_b_height_max) + overlaps_h = torch.clamp(min_of_max - max_of_min, min=0) + + # 3d iou + overlaps_3d = overlaps_bev * overlaps_h + + vol_a = (boxes_a[:, 3] * boxes_a[:, 4] * boxes_a[:, 5]).view(-1, 1) + vol_b = (boxes_b[:, 3] * boxes_b[:, 4] * boxes_b[:, 5]).view(1, -1) + + iou3d = overlaps_3d / torch.clamp(vol_a + vol_b - overlaps_3d, min=1e-6) + + return iou3d + +def boxes_aligned_iou3d_gpu(boxes_a, boxes_b): + """ + Args: + boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading] + + Returns: + ans_iou: (N,) + """ + assert boxes_a.shape[0] == boxes_b.shape[0] + assert boxes_a.shape[1] == boxes_b.shape[1] == 7 + + # height overlap + boxes_a_height_max = (boxes_a[:, 2] + boxes_a[:, 5] / 2).view(-1, 1) + boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, 5] / 2).view(-1, 1) + boxes_b_height_max = (boxes_b[:, 2] + boxes_b[:, 5] / 2).view(-1, 1) + boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, 5] / 2).view(-1, 1) + + # bev overlap + overlaps_bev = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], 1))).zero_() # (N, M) + iou3d_nms_cuda.boxes_aligned_overlap_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), overlaps_bev) + + max_of_min = torch.max(boxes_a_height_min, boxes_b_height_min) + min_of_max = torch.min(boxes_a_height_max, boxes_b_height_max) + overlaps_h = torch.clamp(min_of_max - max_of_min, min=0) + + # 3d iou + overlaps_3d = overlaps_bev * overlaps_h + + vol_a = (boxes_a[:, 3] * boxes_a[:, 4] * boxes_a[:, 5]).view(-1, 1) + vol_b = (boxes_b[:, 3] * boxes_b[:, 4] * boxes_b[:, 5]).view(-1, 1) + + iou3d = overlaps_3d / torch.clamp(vol_a + vol_b - overlaps_3d, min=1e-6) + + return iou3d + + +def nms_gpu(boxes, scores, thresh, pre_maxsize=None, **kwargs): + """ + :param boxes: (N, 7) [x, y, z, dx, dy, dz, heading] + :param scores: (N) + :param thresh: + :return: + """ + assert boxes.shape[1] == 7 + order = scores.sort(0, descending=True)[1] + if pre_maxsize is not None: + order = order[:pre_maxsize] + + boxes = boxes[order].contiguous() + keep = torch.LongTensor(boxes.size(0)) + num_out = iou3d_nms_cuda.nms_gpu(boxes, keep, thresh) + return order[keep[:num_out].cuda()].contiguous(), None + + +def nms_normal_gpu(boxes, scores, thresh, **kwargs): + """ + :param boxes: (N, 7) [x, y, z, dx, dy, dz, heading] + :param scores: (N) + :param thresh: + :return: + """ + assert boxes.shape[1] == 7 + order = scores.sort(0, descending=True)[1] + + boxes = boxes[order].contiguous() + + keep = torch.LongTensor(boxes.size(0)) + num_out = iou3d_nms_cuda.nms_normal_gpu(boxes, keep, thresh) + return order[keep[:num_out].cuda()].contiguous(), None + + +def paired_boxes_iou3d_gpu(boxes_a, boxes_b): + """ + Args: + boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading] + + Returns: + ans_iou: (N) + """ + assert boxes_a.shape[0] == boxes_b.shape[0] + assert boxes_a.shape[1] == boxes_b.shape[1] == 7 + + # height overlap + boxes_a_height_max = (boxes_a[:, 2] + boxes_a[:, 5] / 2).view(-1, 1) + boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, 5] / 2).view(-1, 1) + boxes_b_height_max = (boxes_b[:, 2] + boxes_b[:, 5] / 2).view(-1, 1) + boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, 5] / 2).view(-1, 1) + + # bev overlap + overlaps_bev = torch.cuda.FloatTensor(torch.Size((boxes_a.shape[0], 1))).zero_() # (N, ``) + iou3d_nms_cuda.paired_boxes_overlap_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(), overlaps_bev) + + max_of_min = torch.max(boxes_a_height_min, boxes_b_height_min) + min_of_max = torch.min(boxes_a_height_max, boxes_b_height_max) + overlaps_h = torch.clamp(min_of_max - max_of_min, min=0) + + # 3d iou + overlaps_3d = overlaps_bev * overlaps_h + + vol_a = (boxes_a[:, 3] * boxes_a[:, 4] * boxes_a[:, 5]).view(-1, 1) + vol_b = (boxes_b[:, 3] * boxes_b[:, 4] * boxes_b[:, 5]).view(-1, 1) + + iou3d = overlaps_3d / torch.clamp(vol_a + vol_b - overlaps_3d, min=1e-6) + + return iou3d.view(-1) \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_cpu.cpp b/toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_cpu.cpp new file mode 100644 index 000000000..c272a580b --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_cpu.cpp @@ -0,0 +1,273 @@ +/* +3D Rotated IoU Calculation (CPU) +Written by Shaoshuai Shi +All Rights Reserved 2020. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include "iou3d_cpu.h" + +#define CHECK_CUDA(x) do { \ + if (!x.type().is_cuda()) { \ + fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_CONTIGUOUS(x) do { \ + if (!x.is_contiguous()) { \ + fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) + +inline float min(float a, float b){ + return a > b ? b : a; +} + +inline float max(float a, float b){ + return a > b ? a : b; +} + +const float EPS = 1e-8; +struct Point { + float x, y; + __device__ Point() {} + __device__ Point(float _x, float _y){ + x = _x, y = _y; + } + + __device__ void set(float _x, float _y){ + x = _x; y = _y; + } + + __device__ Point operator +(const Point &b)const{ + return Point(x + b.x, y + b.y); + } + + __device__ Point operator -(const Point &b)const{ + return Point(x - b.x, y - b.y); + } +}; + +inline float cross(const Point &a, const Point &b){ + return a.x * b.y - a.y * b.x; +} + +inline float cross(const Point &p1, const Point &p2, const Point &p0){ + return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); +} + +inline int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2){ + int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) && + min(q1.x,q2.x) <= max(p1.x,p2.x) && + min(p1.y,p2.y) <= max(q1.y,q2.y) && + min(q1.y,q2.y) <= max(p1.y,p2.y); + return ret; +} + +inline int check_in_box2d(const float *box, const Point &p){ + //params: (7) [x, y, z, dx, dy, dz, heading] + const float MARGIN = 1e-2; + + float center_x = box[0], center_y = box[1]; + float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box + float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin); + float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; + + return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN); +} + +inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){ + // fast exclusion + if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; + + // check cross standing + float s1 = cross(q0, p1, p0); + float s2 = cross(p1, q1, p0); + float s3 = cross(p0, q1, q0); + float s4 = cross(q1, p1, q0); + + if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; + + // calculate intersection of two lines + float s5 = cross(q1, p1, p0); + if(fabs(s5 - s1) > EPS){ + ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); + ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); + + } + else{ + float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; + float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; + float D = a0 * b1 - a1 * b0; + + ans.x = (b0 * c1 - b1 * c0) / D; + ans.y = (a1 * c0 - a0 * c1) / D; + } + + return 1; +} + +inline void rotate_around_center(const Point ¢er, const float angle_cos, const float angle_sin, Point &p){ + float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x; + float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; + p.set(new_x, new_y); +} + +inline int point_cmp(const Point &a, const Point &b, const Point ¢er){ + return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); +} + +inline float box_overlap(const float *box_a, const float *box_b){ + // params: box_a (7) [x, y, z, dx, dy, dz, heading] + // params: box_b (7) [x, y, z, dx, dy, dz, heading] + +// float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 = box_a[3], a_angle = box_a[4]; +// float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 = box_b[3], b_angle = box_b[4]; + float a_angle = box_a[6], b_angle = box_b[6]; + float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; + float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; + float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; + float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; + float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; + + Point center_a(box_a[0], box_a[1]); + Point center_b(box_b[0], box_b[1]); + + Point box_a_corners[5]; + box_a_corners[0].set(a_x1, a_y1); + box_a_corners[1].set(a_x2, a_y1); + box_a_corners[2].set(a_x2, a_y2); + box_a_corners[3].set(a_x1, a_y2); + + Point box_b_corners[5]; + box_b_corners[0].set(b_x1, b_y1); + box_b_corners[1].set(b_x2, b_y1); + box_b_corners[2].set(b_x2, b_y2); + box_b_corners[3].set(b_x1, b_y2); + + // get oriented corners + float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); + float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); + + for (int k = 0; k < 4; k++){ + rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); + rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); + } + + box_a_corners[4] = box_a_corners[0]; + box_b_corners[4] = box_b_corners[0]; + + // get intersection of lines + Point cross_points[16]; + Point poly_center; + int cnt = 0, flag = 0; + + poly_center.set(0, 0); + for (int i = 0; i < 4; i++){ + for (int j = 0; j < 4; j++){ + flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); + if (flag){ + poly_center = poly_center + cross_points[cnt]; + cnt++; + } + } + } + + // check corners + for (int k = 0; k < 4; k++){ + if (check_in_box2d(box_a, box_b_corners[k])){ + poly_center = poly_center + box_b_corners[k]; + cross_points[cnt] = box_b_corners[k]; + cnt++; + } + if (check_in_box2d(box_b, box_a_corners[k])){ + poly_center = poly_center + box_a_corners[k]; + cross_points[cnt] = box_a_corners[k]; + cnt++; + } + } + + poly_center.x /= cnt; + poly_center.y /= cnt; + + // sort the points of polygon + Point temp; + for (int j = 0; j < cnt - 1; j++){ + for (int i = 0; i < cnt - j - 1; i++){ + if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){ + temp = cross_points[i]; + cross_points[i] = cross_points[i + 1]; + cross_points[i + 1] = temp; + } + } + } + + // get the overlap areas + float area = 0; + for (int k = 0; k < cnt - 1; k++){ + area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); + } + + return fabs(area) / 2.0; +} + +inline float iou_bev(const float *box_a, const float *box_b){ + // params: box_a (7) [x, y, z, dx, dy, dz, heading] + // params: box_b (7) [x, y, z, dx, dy, dz, heading] + float sa = box_a[3] * box_a[4]; + float sb = box_b[3] * box_b[4]; + float s_overlap = box_overlap(box_a, box_b); + return s_overlap / fmaxf(sa + sb - s_overlap, EPS); +} + + +int boxes_iou_bev_cpu(at::Tensor boxes_a_tensor, at::Tensor boxes_b_tensor, at::Tensor ans_iou_tensor){ + // params boxes_a_tensor: (N, 7) [x, y, z, dx, dy, dz, heading] + // params boxes_b_tensor: (M, 7) [x, y, z, dx, dy, dz, heading] + // params ans_iou_tensor: (N, M) + + CHECK_CONTIGUOUS(boxes_a_tensor); + CHECK_CONTIGUOUS(boxes_b_tensor); + + int num_boxes_a = boxes_a_tensor.size(0); + int num_boxes_b = boxes_b_tensor.size(0); + const float *boxes_a = boxes_a_tensor.data(); + const float *boxes_b = boxes_b_tensor.data(); + float *ans_iou = ans_iou_tensor.data(); + + for (int i = 0; i < num_boxes_a; i++){ + for (int j = 0; j < num_boxes_b; j++){ + ans_iou[i * num_boxes_b + j] = iou_bev(boxes_a + i * 7, boxes_b + j * 7); + } + } + return 1; +} + +int boxes_aligned_iou_bev_cpu(at::Tensor boxes_a_tensor, at::Tensor boxes_b_tensor, at::Tensor ans_iou_tensor){ + // params boxes_a_tensor: (N, 7) [x, y, z, dx, dy, dz, heading] + // params boxes_b_tensor: (N, 7) [x, y, z, dx, dy, dz, heading] + // params ans_iou_tensor: (N, 1) + + CHECK_CONTIGUOUS(boxes_a_tensor); + CHECK_CONTIGUOUS(boxes_b_tensor); + + int num_boxes = boxes_a_tensor.size(0); + int num_boxes_b = boxes_b_tensor.size(0); + assert(num_boxes == num_boxes_b); + const float *boxes_a = boxes_a_tensor.data(); + const float *boxes_b = boxes_b_tensor.data(); + float *ans_iou = ans_iou_tensor.data(); + + for (int i = 0; i < num_boxes; i++){ + ans_iou[i] = iou_bev(boxes_a + i * 7, boxes_b + i * 7); + } + return 1; +} diff --git a/toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_cpu.h b/toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_cpu.h new file mode 100644 index 000000000..4d93bb6e3 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_cpu.h @@ -0,0 +1,11 @@ +#ifndef IOU3D_CPU_H +#define IOU3D_CPU_H + +#include +#include +#include +#include + +int boxes_iou_bev_cpu(at::Tensor boxes_a_tensor, at::Tensor boxes_b_tensor, at::Tensor ans_iou_tensor); +int boxes_aligned_iou_bev_cpu(at::Tensor boxes_a_tensor, at::Tensor boxes_b_tensor, at::Tensor ans_iou_tensor); +#endif diff --git a/toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_nms.cpp b/toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_nms.cpp new file mode 100644 index 000000000..179a26cf6 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_nms.cpp @@ -0,0 +1,235 @@ +/* +3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + +#include +#include +#include +#include +#include +#include "iou3d_nms.h" + +#define CHECK_CUDA(x) do { \ + if (!x.type().is_cuda()) { \ + fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_CONTIGUOUS(x) do { \ + if (!x.is_contiguous()) { \ + fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) + +#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) + +#define CHECK_ERROR(ans) { gpuAssert((ans), __FILE__, __LINE__); } +inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) +{ + if (code != cudaSuccess) + { + fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); + if (abort) exit(code); + } +} + +const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; + +void boxesalignedoverlapLauncher(const int num_box, const float *boxes_a, const float *boxes_b, float *ans_overlap); +void boxesoverlapLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap); +void PairedBoxesOverlapLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap); +void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou); +void nmsLauncher(const float *boxes, unsigned long long * mask, int boxes_num, float nms_overlap_thresh); +void nmsNormalLauncher(const float *boxes, unsigned long long * mask, int boxes_num, float nms_overlap_thresh); + + +int boxes_aligned_overlap_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_overlap){ + // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + // params boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading] + // params ans_overlap: (N, 1) + + CHECK_INPUT(boxes_a); + CHECK_INPUT(boxes_b); + CHECK_INPUT(ans_overlap); + + int num_box = boxes_a.size(0); + int num_b = boxes_b.size(0); + + assert(num_box == num_b); + + const float * boxes_a_data = boxes_a.data(); + const float * boxes_b_data = boxes_b.data(); + float * ans_overlap_data = ans_overlap.data(); + + boxesalignedoverlapLauncher(num_box, boxes_a_data, boxes_b_data, ans_overlap_data); + + return 1; +} + +int boxes_overlap_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_overlap){ + // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] + // params ans_overlap: (N, M) + + CHECK_INPUT(boxes_a); + CHECK_INPUT(boxes_b); + CHECK_INPUT(ans_overlap); + + int num_a = boxes_a.size(0); + int num_b = boxes_b.size(0); + + const float * boxes_a_data = boxes_a.data(); + const float * boxes_b_data = boxes_b.data(); + float * ans_overlap_data = ans_overlap.data(); + + boxesoverlapLauncher(num_a, boxes_a_data, num_b, boxes_b_data, ans_overlap_data); + + return 1; +} + +int paired_boxes_overlap_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_overlap){ + // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + // params boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading] + // params ans_overlap: (N, 1) + + CHECK_INPUT(boxes_a); + CHECK_INPUT(boxes_b); + CHECK_INPUT(ans_overlap); + + int num_a = boxes_a.size(0); + int num_b = boxes_b.size(0); + + assert(num_a == num_b); + + const float * boxes_a_data = boxes_a.data(); + const float * boxes_b_data = boxes_b.data(); + float * ans_overlap_data = ans_overlap.data(); + + PairedBoxesOverlapLauncher(num_a, boxes_a_data, num_b, boxes_b_data, ans_overlap_data); + + return 1; +} + +int boxes_iou_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_iou){ + // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] + // params ans_overlap: (N, M) + CHECK_INPUT(boxes_a); + CHECK_INPUT(boxes_b); + CHECK_INPUT(ans_iou); + + int num_a = boxes_a.size(0); + int num_b = boxes_b.size(0); + + const float * boxes_a_data = boxes_a.data(); + const float * boxes_b_data = boxes_b.data(); + float * ans_iou_data = ans_iou.data(); + + boxesioubevLauncher(num_a, boxes_a_data, num_b, boxes_b_data, ans_iou_data); + + return 1; +} + +int nms_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh){ + // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading] + // params keep: (N) + CHECK_INPUT(boxes); + CHECK_CONTIGUOUS(keep); + + int boxes_num = boxes.size(0); + const float * boxes_data = boxes.data(); + long * keep_data = keep.data(); + + const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); + + unsigned long long *mask_data = NULL; + CHECK_ERROR(cudaMalloc((void**)&mask_data, boxes_num * col_blocks * sizeof(unsigned long long))); + nmsLauncher(boxes_data, mask_data, boxes_num, nms_overlap_thresh); + + // unsigned long long mask_cpu[boxes_num * col_blocks]; + // unsigned long long *mask_cpu = new unsigned long long [boxes_num * col_blocks]; + std::vector mask_cpu(boxes_num * col_blocks); + +// printf("boxes_num=%d, col_blocks=%d\n", boxes_num, col_blocks); + CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data, boxes_num * col_blocks * sizeof(unsigned long long), + cudaMemcpyDeviceToHost)); + + cudaFree(mask_data); + + unsigned long long remv_cpu[col_blocks]; + memset(remv_cpu, 0, col_blocks * sizeof(unsigned long long)); + + int num_to_keep = 0; + + for (int i = 0; i < boxes_num; i++){ + int nblock = i / THREADS_PER_BLOCK_NMS; + int inblock = i % THREADS_PER_BLOCK_NMS; + + if (!(remv_cpu[nblock] & (1ULL << inblock))){ + keep_data[num_to_keep++] = i; + unsigned long long *p = &mask_cpu[0] + i * col_blocks; + for (int j = nblock; j < col_blocks; j++){ + remv_cpu[j] |= p[j]; + } + } + } + if ( cudaSuccess != cudaGetLastError() ) printf( "Error!\n" ); + + return num_to_keep; +} + + +int nms_normal_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh){ + // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading] + // params keep: (N) + + CHECK_INPUT(boxes); + CHECK_CONTIGUOUS(keep); + + int boxes_num = boxes.size(0); + const float * boxes_data = boxes.data(); + long * keep_data = keep.data(); + + const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); + + unsigned long long *mask_data = NULL; + CHECK_ERROR(cudaMalloc((void**)&mask_data, boxes_num * col_blocks * sizeof(unsigned long long))); + nmsNormalLauncher(boxes_data, mask_data, boxes_num, nms_overlap_thresh); + + // unsigned long long mask_cpu[boxes_num * col_blocks]; + // unsigned long long *mask_cpu = new unsigned long long [boxes_num * col_blocks]; + std::vector mask_cpu(boxes_num * col_blocks); + +// printf("boxes_num=%d, col_blocks=%d\n", boxes_num, col_blocks); + CHECK_ERROR(cudaMemcpy(&mask_cpu[0], mask_data, boxes_num * col_blocks * sizeof(unsigned long long), + cudaMemcpyDeviceToHost)); + + cudaFree(mask_data); + + unsigned long long remv_cpu[col_blocks]; + memset(remv_cpu, 0, col_blocks * sizeof(unsigned long long)); + + int num_to_keep = 0; + + for (int i = 0; i < boxes_num; i++){ + int nblock = i / THREADS_PER_BLOCK_NMS; + int inblock = i % THREADS_PER_BLOCK_NMS; + + if (!(remv_cpu[nblock] & (1ULL << inblock))){ + keep_data[num_to_keep++] = i; + unsigned long long *p = &mask_cpu[0] + i * col_blocks; + for (int j = nblock; j < col_blocks; j++){ + remv_cpu[j] |= p[j]; + } + } + } + if ( cudaSuccess != cudaGetLastError() ) printf( "Error!\n" ); + + return num_to_keep; +} + + diff --git a/toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_nms.h b/toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_nms.h new file mode 100644 index 000000000..320202758 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_nms.h @@ -0,0 +1,17 @@ +#ifndef IOU3D_NMS_H +#define IOU3D_NMS_H + +#include +#include +#include +#include +#include + +int boxes_aligned_overlap_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_overlap); +int boxes_overlap_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_overlap); +int paired_boxes_overlap_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_overlap); +int boxes_iou_bev_gpu(at::Tensor boxes_a, at::Tensor boxes_b, at::Tensor ans_iou); +int nms_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh); +int nms_normal_gpu(at::Tensor boxes, at::Tensor keep, float nms_overlap_thresh); + +#endif diff --git a/toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_nms_api.cpp b/toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_nms_api.cpp new file mode 100644 index 000000000..972b55b5b --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_nms_api.cpp @@ -0,0 +1,20 @@ +#include +#include +#include +#include +#include + +#include "iou3d_cpu.h" +#include "iou3d_nms.h" + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("boxes_aligned_overlap_bev_gpu", &boxes_aligned_overlap_bev_gpu, "aligned oriented boxes overlap"); + m.def("boxes_overlap_bev_gpu", &boxes_overlap_bev_gpu, "oriented boxes overlap"); + m.def("paired_boxes_overlap_bev_gpu", &paired_boxes_overlap_bev_gpu, "oriented boxes overlap"); + m.def("boxes_iou_bev_gpu", &boxes_iou_bev_gpu, "oriented boxes iou"); + m.def("nms_gpu", &nms_gpu, "oriented nms gpu"); + m.def("nms_normal_gpu", &nms_normal_gpu, "nms gpu"); + m.def("boxes_aligned_iou_bev_cpu", &boxes_aligned_iou_bev_cpu, "aligned oriented boxes iou"); + m.def("boxes_iou_bev_cpu", &boxes_iou_bev_cpu, "oriented boxes iou"); +} diff --git a/toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_nms_kernel.cu b/toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_nms_kernel.cu new file mode 100644 index 000000000..23bdcbfa4 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/iou3d_nms/src/iou3d_nms_kernel.cu @@ -0,0 +1,464 @@ +/* +3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others) +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#include +#define THREADS_PER_BLOCK 16 +#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) + +// #define DEBUG +const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8; +const float EPS = 1e-8; +struct Point { + float x, y; + __device__ Point() {} + __device__ Point(float _x, float _y){ + x = _x, y = _y; + } + + __device__ void set(float _x, float _y){ + x = _x; y = _y; + } + + __device__ Point operator +(const Point &b)const{ + return Point(x + b.x, y + b.y); + } + + __device__ Point operator -(const Point &b)const{ + return Point(x - b.x, y - b.y); + } +}; + +__device__ inline float cross(const Point &a, const Point &b){ + return a.x * b.y - a.y * b.x; +} + +__device__ inline float cross(const Point &p1, const Point &p2, const Point &p0){ + return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y); +} + +__device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2){ + int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) && + min(q1.x,q2.x) <= max(p1.x,p2.x) && + min(p1.y,p2.y) <= max(q1.y,q2.y) && + min(q1.y,q2.y) <= max(p1.y,p2.y); + return ret; +} + +__device__ inline int check_in_box2d(const float *box, const Point &p){ + //params: (7) [x, y, z, dx, dy, dz, heading] + const float MARGIN = 1e-2; + + float center_x = box[0], center_y = box[1]; + float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]); // rotate the point in the opposite direction of box + float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin); + float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos; + + return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN); +} + +__device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){ + // fast exclusion + if (check_rect_cross(p0, p1, q0, q1) == 0) return 0; + + // check cross standing + float s1 = cross(q0, p1, p0); + float s2 = cross(p1, q1, p0); + float s3 = cross(p0, q1, q0); + float s4 = cross(q1, p1, q0); + + if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0; + + // calculate intersection of two lines + float s5 = cross(q1, p1, p0); + if(fabs(s5 - s1) > EPS){ + ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1); + ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1); + + } + else{ + float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y; + float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y; + float D = a0 * b1 - a1 * b0; + + ans.x = (b0 * c1 - b1 * c0) / D; + ans.y = (a1 * c0 - a0 * c1) / D; + } + + return 1; +} + +__device__ inline void rotate_around_center(const Point ¢er, const float angle_cos, const float angle_sin, Point &p){ + float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x; + float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y; + p.set(new_x, new_y); +} + +__device__ inline int point_cmp(const Point &a, const Point &b, const Point ¢er){ + return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x); +} + +__device__ inline float box_overlap(const float *box_a, const float *box_b){ + // params box_a: [x, y, z, dx, dy, dz, heading] + // params box_b: [x, y, z, dx, dy, dz, heading] + + float a_angle = box_a[6], b_angle = box_b[6]; + float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2; + float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half; + float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half; + float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half; + float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half; + + Point center_a(box_a[0], box_a[1]); + Point center_b(box_b[0], box_b[1]); + +#ifdef DEBUG + printf("a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle, + b_x1, b_y1, b_x2, b_y2, b_angle); + printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y); +#endif + + Point box_a_corners[5]; + box_a_corners[0].set(a_x1, a_y1); + box_a_corners[1].set(a_x2, a_y1); + box_a_corners[2].set(a_x2, a_y2); + box_a_corners[3].set(a_x1, a_y2); + + Point box_b_corners[5]; + box_b_corners[0].set(b_x1, b_y1); + box_b_corners[1].set(b_x2, b_y1); + box_b_corners[2].set(b_x2, b_y2); + box_b_corners[3].set(b_x1, b_y2); + + // get oriented corners + float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle); + float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle); + + for (int k = 0; k < 4; k++){ +#ifdef DEBUG + printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); +#endif + rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]); + rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]); +#ifdef DEBUG + printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y); +#endif + } + + box_a_corners[4] = box_a_corners[0]; + box_b_corners[4] = box_b_corners[0]; + + // get intersection of lines + Point cross_points[16]; + Point poly_center; + int cnt = 0, flag = 0; + + poly_center.set(0, 0); + for (int i = 0; i < 4; i++){ + for (int j = 0; j < 4; j++){ + flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]); + if (flag){ + poly_center = poly_center + cross_points[cnt]; + cnt++; +#ifdef DEBUG + printf("Cross points (%.3f, %.3f): a(%.3f, %.3f)->(%.3f, %.3f), b(%.3f, %.3f)->(%.3f, %.3f) \n", + cross_points[cnt - 1].x, cross_points[cnt - 1].y, + box_a_corners[i].x, box_a_corners[i].y, box_a_corners[i + 1].x, box_a_corners[i + 1].y, + box_b_corners[i].x, box_b_corners[i].y, box_b_corners[i + 1].x, box_b_corners[i + 1].y); +#endif + } + } + } + + // check corners + for (int k = 0; k < 4; k++){ + if (check_in_box2d(box_a, box_b_corners[k])){ + poly_center = poly_center + box_b_corners[k]; + cross_points[cnt] = box_b_corners[k]; + cnt++; +#ifdef DEBUG + printf("b corners in a: corner_b(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); +#endif + } + if (check_in_box2d(box_b, box_a_corners[k])){ + poly_center = poly_center + box_a_corners[k]; + cross_points[cnt] = box_a_corners[k]; + cnt++; +#ifdef DEBUG + printf("a corners in b: corner_a(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y); +#endif + } + } + + poly_center.x /= cnt; + poly_center.y /= cnt; + + // sort the points of polygon + Point temp; + for (int j = 0; j < cnt - 1; j++){ + for (int i = 0; i < cnt - j - 1; i++){ + if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){ + temp = cross_points[i]; + cross_points[i] = cross_points[i + 1]; + cross_points[i + 1] = temp; + } + } + } + +#ifdef DEBUG + printf("cnt=%d\n", cnt); + for (int i = 0; i < cnt; i++){ + printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y); + } +#endif + + // get the overlap areas + float area = 0; + for (int k = 0; k < cnt - 1; k++){ + area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]); + } + + return fabs(area) / 2.0; +} + +__device__ inline float iou_bev(const float *box_a, const float *box_b){ + // params box_a: [x, y, z, dx, dy, dz, heading] + // params box_b: [x, y, z, dx, dy, dz, heading] + float sa = box_a[3] * box_a[4]; + float sb = box_b[3] * box_b[4]; + float s_overlap = box_overlap(box_a, box_b); + return s_overlap / fmaxf(sa + sb - s_overlap, EPS); +} + +__global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap){ + // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] + const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; + const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; + + if (a_idx >= num_a || b_idx >= num_b){ + return; + } + const float * cur_box_a = boxes_a + a_idx * 7; + const float * cur_box_b = boxes_b + b_idx * 7; + float s_overlap = box_overlap(cur_box_a, cur_box_b); + ans_overlap[a_idx * num_b + b_idx] = s_overlap; +} + +__global__ void paired_boxes_overlap_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap){ + // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + // params boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading] + const int idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; + + if (idx >= num_a){ + return; + } + const float * cur_box_a = boxes_a + idx * 7; + const float * cur_box_b = boxes_b + idx * 7; + float s_overlap = box_overlap(cur_box_a, cur_box_b); + // printf("idx=%d, box_a=(%.3f, %.3f, %.3f, ), box_b=(%.3f, %.3f, %.3f, ), overlap=%.5f\n", idx, cur_box_a[0], cur_box_a[1], cur_box_a[2], cur_box_b[0], cur_box_b[1], cur_box_b[2], s_overlap); + ans_overlap[idx] = s_overlap; +} + +__global__ void boxes_aligned_overlap_kernel(const int num_box, const float *boxes_a, const float *boxes_b, float *ans_overlap){ + // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + // params boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading] + const int idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; + if (idx >= num_box){ + return; + } + const float * cur_box_a = boxes_a + idx * 7; + const float * cur_box_b = boxes_b + idx * 7; + float s_overlap = box_overlap(cur_box_a, cur_box_b); + ans_overlap[idx] = s_overlap; +} + +__global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){ + // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading] + const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y; + const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; + + if (a_idx >= num_a || b_idx >= num_b){ + return; + } + + const float * cur_box_a = boxes_a + a_idx * 7; + const float * cur_box_b = boxes_b + b_idx * 7; + float cur_iou_bev = iou_bev(cur_box_a, cur_box_b); + ans_iou[a_idx * num_b + b_idx] = cur_iou_bev; +} + +__global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh, + const float *boxes, unsigned long long *mask){ + //params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] + //params: mask (N, N/THREADS_PER_BLOCK_NMS) + + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + + const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); + const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); + + __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; + + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0]; + block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1]; + block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2]; + block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3]; + block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4]; + block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5]; + block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; + const float *cur_box = boxes + cur_box_idx * 7; + + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh){ + t |= 1ULL << i; + } + } + const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); + mask[cur_box_idx * col_blocks + col_start] = t; + } +} + + +__device__ inline float iou_normal(float const * const a, float const * const b) { + //params: a: [x, y, z, dx, dy, dz, heading] + //params: b: [x, y, z, dx, dy, dz, heading] + + float left = fmaxf(a[0] - a[3] / 2, b[0] - b[3] / 2), right = fminf(a[0] + a[3] / 2, b[0] + b[3] / 2); + float top = fmaxf(a[1] - a[4] / 2, b[1] - b[4] / 2), bottom = fminf(a[1] + a[4] / 2, b[1] + b[4] / 2); + float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f); + float interS = width * height; + float Sa = a[3] * a[4]; + float Sb = b[3] * b[4]; + return interS / fmaxf(Sa + Sb - interS, EPS); +} + + +__global__ void nms_normal_kernel(const int boxes_num, const float nms_overlap_thresh, + const float *boxes, unsigned long long *mask){ + //params: boxes (N, 7) [x, y, z, dx, dy, dz, heading] + //params: mask (N, N/THREADS_PER_BLOCK_NMS) + + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + // if (row_start > col_start) return; + + const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); + const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS); + + __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7]; + + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0]; + block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1]; + block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2]; + block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3]; + block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4]; + block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5]; + block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x; + const float *cur_box = boxes + cur_box_idx * 7; + + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + if (iou_normal(cur_box, block_boxes + i * 7) > nms_overlap_thresh){ + t |= 1ULL << i; + } + } + const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS); + mask[cur_box_idx * col_blocks + col_start] = t; + } +} + + + + + +void boxesoverlapLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap){ + + dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); + + boxes_overlap_kernel<<>>(num_a, boxes_a, num_b, boxes_b, ans_overlap); +#ifdef DEBUG + cudaDeviceSynchronize(); // for using printf in kernel function +#endif +} + +void PairedBoxesOverlapLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap){ + + dim3 blocks(DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + paired_boxes_overlap_kernel<<>>(num_a, boxes_a, num_b, boxes_b, ans_overlap); +#ifdef DEBUG + cudaDeviceSynchronize(); // for using printf in kernel function +#endif +} + +void boxesalignedoverlapLauncher(const int num_box, const float *boxes_a, const float *boxes_b, float *ans_overlap){ + + dim3 blocks(DIVUP(num_box, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + boxes_aligned_overlap_kernel<<>>(num_box, boxes_a, boxes_b, ans_overlap); +#ifdef DEBUG + cudaDeviceSynchronize(); // for using printf in kernel function +#endif +} + +void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){ + + dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); + + boxes_iou_bev_kernel<<>>(num_a, boxes_a, num_b, boxes_b, ans_iou); +#ifdef DEBUG + cudaDeviceSynchronize(); // for using printf in kernel function +#endif +} + + +void nmsLauncher(const float *boxes, unsigned long long * mask, int boxes_num, float nms_overlap_thresh){ + dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), + DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); + dim3 threads(THREADS_PER_BLOCK_NMS); + nms_kernel<<>>(boxes_num, nms_overlap_thresh, boxes, mask); +} + + +void nmsNormalLauncher(const float *boxes, unsigned long long * mask, int boxes_num, float nms_overlap_thresh){ + dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS), + DIVUP(boxes_num, THREADS_PER_BLOCK_NMS)); + dim3 threads(THREADS_PER_BLOCK_NMS); + nms_normal_kernel<<>>(boxes_num, nms_overlap_thresh, boxes, mask); +} diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/__init__.py b/toolbox/openpcdet/pcdet/ops/pointnet2/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/__init__.py b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_modules.py b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_modules.py new file mode 100644 index 000000000..781a1726d --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_modules.py @@ -0,0 +1,174 @@ +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from . import pointnet2_utils + + +class _PointnetSAModuleBase(nn.Module): + + def __init__(self): + super().__init__() + self.npoint = None + self.groupers = None + self.mlps = None + self.pool_method = 'max_pool' + + def forward(self, xyz: torch.Tensor, features: torch.Tensor = None, new_xyz=None) -> (torch.Tensor, torch.Tensor): + """ + :param xyz: (B, N, 3) tensor of the xyz coordinates of the features + :param features: (B, N, C) tensor of the descriptors of the the features + :param new_xyz: + :return: + new_xyz: (B, npoint, 3) tensor of the new features' xyz + new_features: (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors + """ + new_features_list = [] + + xyz_flipped = xyz.transpose(1, 2).contiguous() + if new_xyz is None: + new_xyz = pointnet2_utils.gather_operation( + xyz_flipped, + pointnet2_utils.farthest_point_sample(xyz, self.npoint) + ).transpose(1, 2).contiguous() if self.npoint is not None else None + + for i in range(len(self.groupers)): + new_features = self.groupers[i](xyz, new_xyz, features) # (B, C, npoint, nsample) + + new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample) + if self.pool_method == 'max_pool': + new_features = F.max_pool2d( + new_features, kernel_size=[1, new_features.size(3)] + ) # (B, mlp[-1], npoint, 1) + elif self.pool_method == 'avg_pool': + new_features = F.avg_pool2d( + new_features, kernel_size=[1, new_features.size(3)] + ) # (B, mlp[-1], npoint, 1) + else: + raise NotImplementedError + + new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) + new_features_list.append(new_features) + + return new_xyz, torch.cat(new_features_list, dim=1) + + +class PointnetSAModuleMSG(_PointnetSAModuleBase): + """Pointnet set abstraction layer with multiscale grouping""" + + def __init__(self, *, npoint: int, radii: List[float], nsamples: List[int], mlps: List[List[int]], bn: bool = True, + use_xyz: bool = True, pool_method='max_pool'): + """ + :param npoint: int + :param radii: list of float, list of radii to group with + :param nsamples: list of int, number of samples in each ball query + :param mlps: list of list of int, spec of the pointnet before the global pooling for each scale + :param bn: whether to use batchnorm + :param use_xyz: + :param pool_method: max_pool / avg_pool + """ + super().__init__() + + assert len(radii) == len(nsamples) == len(mlps) + + self.npoint = npoint + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radii)): + radius = radii[i] + nsample = nsamples[i] + self.groupers.append( + pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz) + if npoint is not None else pointnet2_utils.GroupAll(use_xyz) + ) + mlp_spec = mlps[i] + if use_xyz: + mlp_spec[0] += 3 + + shared_mlps = [] + for k in range(len(mlp_spec) - 1): + shared_mlps.extend([ + nn.Conv2d(mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False), + nn.BatchNorm2d(mlp_spec[k + 1]), + nn.ReLU() + ]) + self.mlps.append(nn.Sequential(*shared_mlps)) + + self.pool_method = pool_method + + +class PointnetSAModule(PointnetSAModuleMSG): + """Pointnet set abstraction layer""" + + def __init__(self, *, mlp: List[int], npoint: int = None, radius: float = None, nsample: int = None, + bn: bool = True, use_xyz: bool = True, pool_method='max_pool'): + """ + :param mlp: list of int, spec of the pointnet before the global max_pool + :param npoint: int, number of features + :param radius: float, radius of ball + :param nsample: int, number of samples in the ball query + :param bn: whether to use batchnorm + :param use_xyz: + :param pool_method: max_pool / avg_pool + """ + super().__init__( + mlps=[mlp], npoint=npoint, radii=[radius], nsamples=[nsample], bn=bn, use_xyz=use_xyz, + pool_method=pool_method + ) + + +class PointnetFPModule(nn.Module): + r"""Propigates the features of one set to another""" + + def __init__(self, *, mlp: List[int], bn: bool = True): + """ + :param mlp: list of int + :param bn: whether to use batchnorm + """ + super().__init__() + + shared_mlps = [] + for k in range(len(mlp) - 1): + shared_mlps.extend([ + nn.Conv2d(mlp[k], mlp[k + 1], kernel_size=1, bias=False), + nn.BatchNorm2d(mlp[k + 1]), + nn.ReLU() + ]) + self.mlp = nn.Sequential(*shared_mlps) + + def forward( + self, unknown: torch.Tensor, known: torch.Tensor, unknow_feats: torch.Tensor, known_feats: torch.Tensor + ) -> torch.Tensor: + """ + :param unknown: (B, n, 3) tensor of the xyz positions of the unknown features + :param known: (B, m, 3) tensor of the xyz positions of the known features + :param unknow_feats: (B, C1, n) tensor of the features to be propigated to + :param known_feats: (B, C2, m) tensor of features to be propigated + :return: + new_features: (B, mlp[-1], n) tensor of the features of the unknown features + """ + if known is not None: + dist, idx = pointnet2_utils.three_nn(unknown, known) + dist_recip = 1.0 / (dist + 1e-8) + norm = torch.sum(dist_recip, dim=2, keepdim=True) + weight = dist_recip / norm + + interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight) + else: + interpolated_feats = known_feats.expand(*known_feats.size()[0:2], unknown.size(1)) + + if unknow_feats is not None: + new_features = torch.cat([interpolated_feats, unknow_feats], dim=1) # (B, C2 + C1, n) + else: + new_features = interpolated_feats + + new_features = new_features.unsqueeze(-1) + new_features = self.mlp(new_features) + + return new_features.squeeze(-1) + + +if __name__ == "__main__": + pass diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_utils.py b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_utils.py new file mode 100644 index 000000000..c57afe15c --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/pointnet2_utils.py @@ -0,0 +1,290 @@ +from typing import Tuple + +import torch +import torch.nn as nn +from torch.autograd import Function, Variable + +from . import pointnet2_batch_cuda as pointnet2 + + +class FarthestPointSampling(Function): + @staticmethod + def forward(ctx, xyz: torch.Tensor, npoint: int) -> torch.Tensor: + """ + Uses iterative farthest point sampling to select a set of npoint features that have the largest + minimum distance + :param ctx: + :param xyz: (B, N, 3) where N > npoint + :param npoint: int, number of features in the sampled set + :return: + output: (B, npoint) tensor containing the set + """ + assert xyz.is_contiguous() + + B, N, _ = xyz.size() + output = torch.cuda.IntTensor(B, npoint) + temp = torch.cuda.FloatTensor(B, N).fill_(1e10) + + pointnet2.farthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output) + return output + + @staticmethod + def backward(xyz, a=None): + return None, None + + +farthest_point_sample = furthest_point_sample = FarthestPointSampling.apply + + +class GatherOperation(Function): + + @staticmethod + def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor: + """ + :param ctx: + :param features: (B, C, N) + :param idx: (B, npoint) index tensor of the features to gather + :return: + output: (B, C, npoint) + """ + assert features.is_contiguous() + assert idx.is_contiguous() + + B, npoint = idx.size() + _, C, N = features.size() + output = torch.cuda.FloatTensor(B, C, npoint) + + pointnet2.gather_points_wrapper(B, C, N, npoint, features, idx, output) + + ctx.for_backwards = (idx, C, N) + return output + + @staticmethod + def backward(ctx, grad_out): + idx, C, N = ctx.for_backwards + B, npoint = idx.size() + + grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_()) + grad_out_data = grad_out.data.contiguous() + pointnet2.gather_points_grad_wrapper(B, C, N, npoint, grad_out_data, idx, grad_features.data) + return grad_features, None + + +gather_operation = GatherOperation.apply + + +class ThreeNN(Function): + + @staticmethod + def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Find the three nearest neighbors of unknown in known + :param ctx: + :param unknown: (B, N, 3) + :param known: (B, M, 3) + :return: + dist: (B, N, 3) l2 distance to the three nearest neighbors + idx: (B, N, 3) index of 3 nearest neighbors + """ + assert unknown.is_contiguous() + assert known.is_contiguous() + + B, N, _ = unknown.size() + m = known.size(1) + dist2 = torch.cuda.FloatTensor(B, N, 3) + idx = torch.cuda.IntTensor(B, N, 3) + + pointnet2.three_nn_wrapper(B, N, m, unknown, known, dist2, idx) + return torch.sqrt(dist2), idx + + @staticmethod + def backward(ctx, a=None, b=None): + return None, None + + +three_nn = ThreeNN.apply + + +class ThreeInterpolate(Function): + + @staticmethod + def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor: + """ + Performs weight linear interpolation on 3 features + :param ctx: + :param features: (B, C, M) Features descriptors to be interpolated from + :param idx: (B, n, 3) three nearest neighbors of the target features in features + :param weight: (B, n, 3) weights + :return: + output: (B, C, N) tensor of the interpolated features + """ + assert features.is_contiguous() + assert idx.is_contiguous() + assert weight.is_contiguous() + + B, c, m = features.size() + n = idx.size(1) + ctx.three_interpolate_for_backward = (idx, weight, m) + output = torch.cuda.FloatTensor(B, c, n) + + pointnet2.three_interpolate_wrapper(B, c, m, n, features, idx, weight, output) + return output + + @staticmethod + def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + :param ctx: + :param grad_out: (B, C, N) tensor with gradients of outputs + :return: + grad_features: (B, C, M) tensor with gradients of features + None: + None: + """ + idx, weight, m = ctx.three_interpolate_for_backward + B, c, n = grad_out.size() + + grad_features = Variable(torch.cuda.FloatTensor(B, c, m).zero_()) + grad_out_data = grad_out.data.contiguous() + + pointnet2.three_interpolate_grad_wrapper(B, c, n, m, grad_out_data, idx, weight, grad_features.data) + return grad_features, None, None + + +three_interpolate = ThreeInterpolate.apply + + +class GroupingOperation(Function): + + @staticmethod + def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor: + """ + :param ctx: + :param features: (B, C, N) tensor of features to group + :param idx: (B, npoint, nsample) tensor containing the indicies of features to group with + :return: + output: (B, C, npoint, nsample) tensor + """ + assert features.is_contiguous() + assert idx.is_contiguous() + + B, nfeatures, nsample = idx.size() + _, C, N = features.size() + output = torch.cuda.FloatTensor(B, C, nfeatures, nsample) + + pointnet2.group_points_wrapper(B, C, N, nfeatures, nsample, features, idx, output) + + ctx.for_backwards = (idx, N) + return output + + @staticmethod + def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + :param ctx: + :param grad_out: (B, C, npoint, nsample) tensor of the gradients of the output from forward + :return: + grad_features: (B, C, N) gradient of the features + """ + idx, N = ctx.for_backwards + + B, C, npoint, nsample = grad_out.size() + grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_()) + + grad_out_data = grad_out.data.contiguous() + pointnet2.group_points_grad_wrapper(B, C, N, npoint, nsample, grad_out_data, idx, grad_features.data) + return grad_features, None + + +grouping_operation = GroupingOperation.apply + + +class BallQuery(Function): + + @staticmethod + def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor: + """ + :param ctx: + :param radius: float, radius of the balls + :param nsample: int, maximum number of features in the balls + :param xyz: (B, N, 3) xyz coordinates of the features + :param new_xyz: (B, npoint, 3) centers of the ball query + :return: + idx: (B, npoint, nsample) tensor with the indicies of the features that form the query balls + """ + assert new_xyz.is_contiguous() + assert xyz.is_contiguous() + + B, N, _ = xyz.size() + npoint = new_xyz.size(1) + idx = torch.cuda.IntTensor(B, npoint, nsample).zero_() + + pointnet2.ball_query_wrapper(B, N, npoint, radius, nsample, new_xyz, xyz, idx) + return idx + + @staticmethod + def backward(ctx, a=None): + return None, None, None, None + + +ball_query = BallQuery.apply + + +class QueryAndGroup(nn.Module): + def __init__(self, radius: float, nsample: int, use_xyz: bool = True): + """ + :param radius: float, radius of ball + :param nsample: int, maximum number of features to gather in the ball + :param use_xyz: + """ + super().__init__() + self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz + + def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]: + """ + :param xyz: (B, N, 3) xyz coordinates of the features + :param new_xyz: (B, npoint, 3) centroids + :param features: (B, C, N) descriptors of the features + :return: + new_features: (B, 3 + C, npoint, nsample) + """ + idx = ball_query(self.radius, self.nsample, xyz, new_xyz) + xyz_trans = xyz.transpose(1, 2).contiguous() + grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample) + grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) + + if features is not None: + grouped_features = grouping_operation(features, idx) + if self.use_xyz: + new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, C + 3, npoint, nsample) + else: + new_features = grouped_features + else: + assert self.use_xyz, "Cannot have not features and not use xyz as a feature!" + new_features = grouped_xyz + + return new_features + + +class GroupAll(nn.Module): + def __init__(self, use_xyz: bool = True): + super().__init__() + self.use_xyz = use_xyz + + def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None): + """ + :param xyz: (B, N, 3) xyz coordinates of the features + :param new_xyz: ignored + :param features: (B, C, N) descriptors of the features + :return: + new_features: (B, C + 3, 1, N) + """ + grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) + if features is not None: + grouped_features = features.unsqueeze(2) + if self.use_xyz: + new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, 3 + C, 1, N) + else: + new_features = grouped_features + else: + new_features = grouped_xyz + + return new_features diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query.cpp b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query.cpp new file mode 100644 index 000000000..c0e2d8fea --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query.cpp @@ -0,0 +1,39 @@ +/* +batch version of ball query, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + + +#include +#include +#include +#include +#include "ball_query_gpu.h" + +#define CHECK_CUDA(x) do { \ + if (!x.type().is_cuda()) { \ + fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_CONTIGUOUS(x) do { \ + if (!x.is_contiguous()) { \ + fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) + + +int ball_query_wrapper_fast(int b, int n, int m, float radius, int nsample, + at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor) { + CHECK_INPUT(new_xyz_tensor); + CHECK_INPUT(xyz_tensor); + const float *new_xyz = new_xyz_tensor.data(); + const float *xyz = xyz_tensor.data(); + int *idx = idx_tensor.data(); + + ball_query_kernel_launcher_fast(b, n, m, radius, nsample, new_xyz, xyz, idx); + return 1; +} diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query_gpu.cu b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query_gpu.cu new file mode 100644 index 000000000..38c006369 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query_gpu.cu @@ -0,0 +1,73 @@ +/* +batch version of ball query, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + +#include +#include +#include + +#include "ball_query_gpu.h" +#include "cuda_utils.h" + + +__global__ void ball_query_kernel_fast(int b, int n, int m, float radius, int nsample, + const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) { + // new_xyz: (B, M, 3) + // xyz: (B, N, 3) + // output: + // idx: (B, M, nsample) + int bs_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= b || pt_idx >= m) return; + + new_xyz += bs_idx * m * 3 + pt_idx * 3; + xyz += bs_idx * n * 3; + idx += bs_idx * m * nsample + pt_idx * nsample; + + float radius2 = radius * radius; + float new_x = new_xyz[0]; + float new_y = new_xyz[1]; + float new_z = new_xyz[2]; + + int cnt = 0; + for (int k = 0; k < n; ++k) { + float x = xyz[k * 3 + 0]; + float y = xyz[k * 3 + 1]; + float z = xyz[k * 3 + 2]; + float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z); + if (d2 < radius2){ + if (cnt == 0){ + for (int l = 0; l < nsample; ++l) { + idx[l] = k; + } + } + idx[cnt] = k; + ++cnt; + if (cnt >= nsample) break; + } + } +} + + +void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, \ + const float *new_xyz, const float *xyz, int *idx) { + // new_xyz: (B, M, 3) + // xyz: (B, N, 3) + // output: + // idx: (B, M, nsample) + + cudaError_t err; + + dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + ball_query_kernel_fast<<>>(b, n, m, radius, nsample, new_xyz, xyz, idx); + // cudaDeviceSynchronize(); // for using printf in kernel function + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query_gpu.h b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query_gpu.h new file mode 100644 index 000000000..1213dda79 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/ball_query_gpu.h @@ -0,0 +1,15 @@ +#ifndef _BALL_QUERY_GPU_H +#define _BALL_QUERY_GPU_H + +#include +#include +#include +#include + +int ball_query_wrapper_fast(int b, int n, int m, float radius, int nsample, + at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor); + +void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, + const float *xyz, const float *new_xyz, int *idx); + +#endif diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/cuda_utils.h b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/cuda_utils.h new file mode 100644 index 000000000..e2716b8e7 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/cuda_utils.h @@ -0,0 +1,16 @@ +#ifndef _CUDA_UTILS_H +#define _CUDA_UTILS_H + +#include +#include + +#define TOTAL_THREADS 1024 +#define THREADS_PER_BLOCK 256 +#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) + +inline int opt_n_threads(int work_size) { + const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0); + + return std::max(std::min(1 << pow_2, TOTAL_THREADS), 1); +} +#endif diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/group_points.cpp b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/group_points.cpp new file mode 100644 index 000000000..9735ae88d --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/group_points.cpp @@ -0,0 +1,36 @@ +/* +batch version of point grouping, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + + +#include +#include +#include +#include +#include "group_points_gpu.h" + + +int group_points_grad_wrapper_fast(int b, int c, int n, int npoints, int nsample, + at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) { + + float *grad_points = grad_points_tensor.data(); + const int *idx = idx_tensor.data(); + const float *grad_out = grad_out_tensor.data(); + + group_points_grad_kernel_launcher_fast(b, c, n, npoints, nsample, grad_out, idx, grad_points); + return 1; +} + + +int group_points_wrapper_fast(int b, int c, int n, int npoints, int nsample, + at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor) { + + const float *points = points_tensor.data(); + const int *idx = idx_tensor.data(); + float *out = out_tensor.data(); + + group_points_kernel_launcher_fast(b, c, n, npoints, nsample, points, idx, out); + return 1; +} diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/group_points_gpu.cu b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/group_points_gpu.cu new file mode 100644 index 000000000..d9038f69d --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/group_points_gpu.cu @@ -0,0 +1,92 @@ +/* +batch version of point grouping, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + +#include +#include + +#include "cuda_utils.h" +#include "group_points_gpu.h" + + +__global__ void group_points_grad_kernel_fast(int b, int c, int n, int npoints, int nsample, + const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { + // grad_out: (B, C, npoints, nsample) + // idx: (B, npoints, nsample) + // output: + // grad_points: (B, C, N) + int bs_idx = blockIdx.z; + int c_idx = blockIdx.y; + int index = blockIdx.x * blockDim.x + threadIdx.x; + int pt_idx = index / nsample; + if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return; + + int sample_idx = index % nsample; + grad_out += bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx; + idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx; + + atomicAdd(grad_points + bs_idx * c * n + c_idx * n + idx[0] , grad_out[0]); +} + +void group_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample, + const float *grad_out, const int *idx, float *grad_points) { + // grad_out: (B, C, npoints, nsample) + // idx: (B, npoints, nsample) + // output: + // grad_points: (B, C, N) + cudaError_t err; + dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + group_points_grad_kernel_fast<<>>(b, c, n, npoints, nsample, grad_out, idx, grad_points); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} + + +__global__ void group_points_kernel_fast(int b, int c, int n, int npoints, int nsample, + const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { + // points: (B, C, N) + // idx: (B, npoints, nsample) + // output: + // out: (B, C, npoints, nsample) + int bs_idx = blockIdx.z; + int c_idx = blockIdx.y; + int index = blockIdx.x * blockDim.x + threadIdx.x; + int pt_idx = index / nsample; + if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return; + + int sample_idx = index % nsample; + + idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx; + int in_idx = bs_idx * c * n + c_idx * n + idx[0]; + int out_idx = bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx; + + out[out_idx] = points[in_idx]; +} + + +void group_points_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample, + const float *points, const int *idx, float *out) { + // points: (B, C, N) + // idx: (B, npoints, nsample) + // output: + // out: (B, C, npoints, nsample) + cudaError_t err; + dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + group_points_kernel_fast<<>>(b, c, n, npoints, nsample, points, idx, out); + // cudaDeviceSynchronize(); // for using printf in kernel function + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/group_points_gpu.h b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/group_points_gpu.h new file mode 100644 index 000000000..8a17c68bd --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/group_points_gpu.h @@ -0,0 +1,22 @@ +#ifndef _GROUP_POINTS_GPU_H +#define _GROUP_POINTS_GPU_H + +#include +#include +#include +#include + + +int group_points_wrapper_fast(int b, int c, int n, int npoints, int nsample, + at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor); + +void group_points_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample, + const float *points, const int *idx, float *out); + +int group_points_grad_wrapper_fast(int b, int c, int n, int npoints, int nsample, + at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor); + +void group_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample, + const float *grad_out, const int *idx, float *grad_points); + +#endif diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/interpolate.cpp b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/interpolate.cpp new file mode 100644 index 000000000..1c18e277d --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/interpolate.cpp @@ -0,0 +1,56 @@ +/* +batch version of point interpolation, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + + +#include +#include +#include +#include +#include +#include +#include +#include "interpolate_gpu.h" + + +void three_nn_wrapper_fast(int b, int n, int m, at::Tensor unknown_tensor, + at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor) { + const float *unknown = unknown_tensor.data(); + const float *known = known_tensor.data(); + float *dist2 = dist2_tensor.data(); + int *idx = idx_tensor.data(); + + three_nn_kernel_launcher_fast(b, n, m, unknown, known, dist2, idx); +} + + +void three_interpolate_wrapper_fast(int b, int c, int m, int n, + at::Tensor points_tensor, + at::Tensor idx_tensor, + at::Tensor weight_tensor, + at::Tensor out_tensor) { + + const float *points = points_tensor.data(); + const float *weight = weight_tensor.data(); + float *out = out_tensor.data(); + const int *idx = idx_tensor.data(); + + three_interpolate_kernel_launcher_fast(b, c, m, n, points, idx, weight, out); +} + + +void three_interpolate_grad_wrapper_fast(int b, int c, int n, int m, + at::Tensor grad_out_tensor, + at::Tensor idx_tensor, + at::Tensor weight_tensor, + at::Tensor grad_points_tensor) { + + const float *grad_out = grad_out_tensor.data(); + const float *weight = weight_tensor.data(); + float *grad_points = grad_points_tensor.data(); + const int *idx = idx_tensor.data(); + + three_interpolate_grad_kernel_launcher_fast(b, c, n, m, grad_out, idx, weight, grad_points); +} diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/interpolate_gpu.cu b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/interpolate_gpu.cu new file mode 100644 index 000000000..6922aeddc --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/interpolate_gpu.cu @@ -0,0 +1,169 @@ +/* +batch version of point interpolation, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + + +#include +#include +#include +#include + +#include "cuda_utils.h" +#include "interpolate_gpu.h" + + +__global__ void three_nn_kernel_fast(int b, int n, int m, const float *__restrict__ unknown, + const float *__restrict__ known, float *__restrict__ dist2, int *__restrict__ idx) { + // unknown: (B, N, 3) + // known: (B, M, 3) + // output: + // dist2: (B, N, 3) + // idx: (B, N, 3) + + int bs_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= b || pt_idx >= n) return; + + unknown += bs_idx * n * 3 + pt_idx * 3; + known += bs_idx * m * 3; + dist2 += bs_idx * n * 3 + pt_idx * 3; + idx += bs_idx * n * 3 + pt_idx * 3; + + float ux = unknown[0]; + float uy = unknown[1]; + float uz = unknown[2]; + + float best1 = FLT_MAX, best2 = FLT_MAX, best3 = FLT_MAX; + int besti1 = 0, besti2 = 0, besti3 = 0; + for (int k = 0; k < m; ++k) { + float x = known[k * 3 + 0]; + float y = known[k * 3 + 1]; + float z = known[k * 3 + 2]; + float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z); + if (d < best1) { + best3 = best2; besti3 = besti2; + best2 = best1; besti2 = besti1; + best1 = d; besti1 = k; + } + else if (d < best2) { + best3 = best2; besti3 = besti2; + best2 = d; besti2 = k; + } + else if (d < best3) { + best3 = d; besti3 = k; + } + } + dist2[0] = best1; dist2[1] = best2; dist2[2] = best3; + idx[0] = besti1; idx[1] = besti2; idx[2] = besti3; +} + + +void three_nn_kernel_launcher_fast(int b, int n, int m, const float *unknown, + const float *known, float *dist2, int *idx) { + // unknown: (B, N, 3) + // known: (B, M, 3) + // output: + // dist2: (B, N, 3) + // idx: (B, N, 3) + + cudaError_t err; + dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + three_nn_kernel_fast<<>>(b, n, m, unknown, known, dist2, idx); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} + + +__global__ void three_interpolate_kernel_fast(int b, int c, int m, int n, const float *__restrict__ points, + const int *__restrict__ idx, const float *__restrict__ weight, float *__restrict__ out) { + // points: (B, C, M) + // idx: (B, N, 3) + // weight: (B, N, 3) + // output: + // out: (B, C, N) + + int bs_idx = blockIdx.z; + int c_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + + if (bs_idx >= b || c_idx >= c || pt_idx >= n) return; + + weight += bs_idx * n * 3 + pt_idx * 3; + points += bs_idx * c * m + c_idx * m; + idx += bs_idx * n * 3 + pt_idx * 3; + out += bs_idx * c * n + c_idx * n; + + out[pt_idx] = weight[0] * points[idx[0]] + weight[1] * points[idx[1]] + weight[2] * points[idx[2]]; +} + +void three_interpolate_kernel_launcher_fast(int b, int c, int m, int n, + const float *points, const int *idx, const float *weight, float *out) { + // points: (B, C, M) + // idx: (B, N, 3) + // weight: (B, N, 3) + // output: + // out: (B, C, N) + + cudaError_t err; + dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + three_interpolate_kernel_fast<<>>(b, c, m, n, points, idx, weight, out); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} + + +__global__ void three_interpolate_grad_kernel_fast(int b, int c, int n, int m, const float *__restrict__ grad_out, + const int *__restrict__ idx, const float *__restrict__ weight, float *__restrict__ grad_points) { + // grad_out: (B, C, N) + // weight: (B, N, 3) + // output: + // grad_points: (B, C, M) + + int bs_idx = blockIdx.z; + int c_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + + if (bs_idx >= b || c_idx >= c || pt_idx >= n) return; + + grad_out += bs_idx * c * n + c_idx * n + pt_idx; + weight += bs_idx * n * 3 + pt_idx * 3; + grad_points += bs_idx * c * m + c_idx * m; + idx += bs_idx * n * 3 + pt_idx * 3; + + + atomicAdd(grad_points + idx[0], grad_out[0] * weight[0]); + atomicAdd(grad_points + idx[1], grad_out[0] * weight[1]); + atomicAdd(grad_points + idx[2], grad_out[0] * weight[2]); +} + +void three_interpolate_grad_kernel_launcher_fast(int b, int c, int n, int m, const float *grad_out, + const int *idx, const float *weight, float *grad_points) { + // grad_out: (B, C, N) + // weight: (B, N, 3) + // output: + // grad_points: (B, C, M) + + cudaError_t err; + dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + three_interpolate_grad_kernel_fast<<>>(b, c, n, m, grad_out, idx, weight, grad_points); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/interpolate_gpu.h b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/interpolate_gpu.h new file mode 100644 index 000000000..95ea1d799 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/interpolate_gpu.h @@ -0,0 +1,30 @@ +#ifndef _INTERPOLATE_GPU_H +#define _INTERPOLATE_GPU_H + +#include +#include +#include +#include + + +void three_nn_wrapper_fast(int b, int n, int m, at::Tensor unknown_tensor, + at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor); + +void three_nn_kernel_launcher_fast(int b, int n, int m, const float *unknown, + const float *known, float *dist2, int *idx); + + +void three_interpolate_wrapper_fast(int b, int c, int m, int n, at::Tensor points_tensor, + at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor); + +void three_interpolate_kernel_launcher_fast(int b, int c, int m, int n, + const float *points, const int *idx, const float *weight, float *out); + + +void three_interpolate_grad_wrapper_fast(int b, int c, int n, int m, at::Tensor grad_out_tensor, + at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor grad_points_tensor); + +void three_interpolate_grad_kernel_launcher_fast(int b, int c, int n, int m, const float *grad_out, + const int *idx, const float *weight, float *grad_points); + +#endif diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/pointnet2_api.cpp b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/pointnet2_api.cpp new file mode 100644 index 000000000..284365061 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/pointnet2_api.cpp @@ -0,0 +1,24 @@ +#include +#include + +#include "ball_query_gpu.h" +#include "group_points_gpu.h" +#include "sampling_gpu.h" +#include "interpolate_gpu.h" + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("ball_query_wrapper", &ball_query_wrapper_fast, "ball_query_wrapper_fast"); + + m.def("group_points_wrapper", &group_points_wrapper_fast, "group_points_wrapper_fast"); + m.def("group_points_grad_wrapper", &group_points_grad_wrapper_fast, "group_points_grad_wrapper_fast"); + + m.def("gather_points_wrapper", &gather_points_wrapper_fast, "gather_points_wrapper_fast"); + m.def("gather_points_grad_wrapper", &gather_points_grad_wrapper_fast, "gather_points_grad_wrapper_fast"); + + m.def("farthest_point_sampling_wrapper", &farthest_point_sampling_wrapper, "farthest_point_sampling_wrapper"); + + m.def("three_nn_wrapper", &three_nn_wrapper_fast, "three_nn_wrapper_fast"); + m.def("three_interpolate_wrapper", &three_interpolate_wrapper_fast, "three_interpolate_wrapper_fast"); + m.def("three_interpolate_grad_wrapper", &three_interpolate_grad_wrapper_fast, "three_interpolate_grad_wrapper_fast"); +} diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/sampling.cpp b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/sampling.cpp new file mode 100644 index 000000000..b00143036 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/sampling.cpp @@ -0,0 +1,46 @@ +/* +batch version of point sampling and gathering, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + + +#include +#include +#include +#include "sampling_gpu.h" + + +int gather_points_wrapper_fast(int b, int c, int n, int npoints, + at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor){ + const float *points = points_tensor.data(); + const int *idx = idx_tensor.data(); + float *out = out_tensor.data(); + + gather_points_kernel_launcher_fast(b, c, n, npoints, points, idx, out); + return 1; +} + + +int gather_points_grad_wrapper_fast(int b, int c, int n, int npoints, + at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) { + + const float *grad_out = grad_out_tensor.data(); + const int *idx = idx_tensor.data(); + float *grad_points = grad_points_tensor.data(); + + gather_points_grad_kernel_launcher_fast(b, c, n, npoints, grad_out, idx, grad_points); + return 1; +} + + +int farthest_point_sampling_wrapper(int b, int n, int m, + at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor) { + + const float *points = points_tensor.data(); + float *temp = temp_tensor.data(); + int *idx = idx_tensor.data(); + + farthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx); + return 1; +} diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/sampling_gpu.cu b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/sampling_gpu.cu new file mode 100644 index 000000000..7aceca00e --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/sampling_gpu.cu @@ -0,0 +1,260 @@ +/* +batch version of point sampling and gathering, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + + +#include +#include + +#include "cuda_utils.h" +#include "sampling_gpu.h" + + +__global__ void gather_points_kernel_fast(int b, int c, int n, int m, + const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { + // points: (B, C, N) + // idx: (B, M) + // output: + // out: (B, C, M) + + int bs_idx = blockIdx.z; + int c_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; + + out += bs_idx * c * m + c_idx * m + pt_idx; + idx += bs_idx * m + pt_idx; + points += bs_idx * c * n + c_idx * n; + out[0] = points[idx[0]]; +} + +void gather_points_kernel_launcher_fast(int b, int c, int n, int npoints, + const float *points, const int *idx, float *out) { + // points: (B, C, N) + // idx: (B, npoints) + // output: + // out: (B, C, npoints) + + cudaError_t err; + dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + gather_points_kernel_fast<<>>(b, c, n, npoints, points, idx, out); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} + +__global__ void gather_points_grad_kernel_fast(int b, int c, int n, int m, const float *__restrict__ grad_out, + const int *__restrict__ idx, float *__restrict__ grad_points) { + // grad_out: (B, C, M) + // idx: (B, M) + // output: + // grad_points: (B, C, N) + + int bs_idx = blockIdx.z; + int c_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; + + grad_out += bs_idx * c * m + c_idx * m + pt_idx; + idx += bs_idx * m + pt_idx; + grad_points += bs_idx * c * n + c_idx * n; + + atomicAdd(grad_points + idx[0], grad_out[0]); +} + +void gather_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, + const float *grad_out, const int *idx, float *grad_points) { + // grad_out: (B, C, npoints) + // idx: (B, npoints) + // output: + // grad_points: (B, C, N) + + cudaError_t err; + dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + gather_points_grad_kernel_fast<<>>(b, c, n, npoints, grad_out, idx, grad_points); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} + + +__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2){ + const float v1 = dists[idx1], v2 = dists[idx2]; + const int i1 = dists_i[idx1], i2 = dists_i[idx2]; + dists[idx1] = max(v1, v2); + dists_i[idx1] = v2 > v1 ? i2 : i1; +} + +template +__global__ void farthest_point_sampling_kernel(int b, int n, int m, + const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { + // dataset: (B, N, 3) + // tmp: (B, N) + // output: + // idx: (B, M) + + if (m <= 0) return; + __shared__ float dists[block_size]; + __shared__ int dists_i[block_size]; + + int batch_index = blockIdx.x; + dataset += batch_index * n * 3; + temp += batch_index * n; + idxs += batch_index * m; + + int tid = threadIdx.x; + const int stride = block_size; + + int old = 0; + if (threadIdx.x == 0) + idxs[0] = old; + + __syncthreads(); + for (int j = 1; j < m; j++) { + int besti = 0; + float best = -1; + float x1 = dataset[old * 3 + 0]; + float y1 = dataset[old * 3 + 1]; + float z1 = dataset[old * 3 + 2]; + for (int k = tid; k < n; k += stride) { + float x2, y2, z2; + x2 = dataset[k * 3 + 0]; + y2 = dataset[k * 3 + 1]; + z2 = dataset[k * 3 + 2]; + // float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); + // if (mag <= 1e-3) + // continue; + + float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); + float d2 = min(d, temp[k]); + temp[k] = d2; + besti = d2 > best ? k : besti; + best = d2 > best ? d2 : best; + } + dists[tid] = best; + dists_i[tid] = besti; + __syncthreads(); + + if (block_size >= 1024) { + if (tid < 512) { + __update(dists, dists_i, tid, tid + 512); + } + __syncthreads(); + } + + if (block_size >= 512) { + if (tid < 256) { + __update(dists, dists_i, tid, tid + 256); + } + __syncthreads(); + } + if (block_size >= 256) { + if (tid < 128) { + __update(dists, dists_i, tid, tid + 128); + } + __syncthreads(); + } + if (block_size >= 128) { + if (tid < 64) { + __update(dists, dists_i, tid, tid + 64); + } + __syncthreads(); + } + if (block_size >= 64) { + if (tid < 32) { + __update(dists, dists_i, tid, tid + 32); + } + __syncthreads(); + } + if (block_size >= 32) { + if (tid < 16) { + __update(dists, dists_i, tid, tid + 16); + } + __syncthreads(); + } + if (block_size >= 16) { + if (tid < 8) { + __update(dists, dists_i, tid, tid + 8); + } + __syncthreads(); + } + if (block_size >= 8) { + if (tid < 4) { + __update(dists, dists_i, tid, tid + 4); + } + __syncthreads(); + } + if (block_size >= 4) { + if (tid < 2) { + __update(dists, dists_i, tid, tid + 2); + } + __syncthreads(); + } + if (block_size >= 2) { + if (tid < 1) { + __update(dists, dists_i, tid, tid + 1); + } + __syncthreads(); + } + + old = dists_i[0]; + if (tid == 0) + idxs[j] = old; + } +} + +void farthest_point_sampling_kernel_launcher(int b, int n, int m, + const float *dataset, float *temp, int *idxs) { + // dataset: (B, N, 3) + // tmp: (B, N) + // output: + // idx: (B, M) + + cudaError_t err; + unsigned int n_threads = opt_n_threads(n); + + switch (n_threads) { + case 1024: + farthest_point_sampling_kernel<1024><<>>(b, n, m, dataset, temp, idxs); break; + case 512: + farthest_point_sampling_kernel<512><<>>(b, n, m, dataset, temp, idxs); break; + case 256: + farthest_point_sampling_kernel<256><<>>(b, n, m, dataset, temp, idxs); break; + case 128: + farthest_point_sampling_kernel<128><<>>(b, n, m, dataset, temp, idxs); break; + case 64: + farthest_point_sampling_kernel<64><<>>(b, n, m, dataset, temp, idxs); break; + case 32: + farthest_point_sampling_kernel<32><<>>(b, n, m, dataset, temp, idxs); break; + case 16: + farthest_point_sampling_kernel<16><<>>(b, n, m, dataset, temp, idxs); break; + case 8: + farthest_point_sampling_kernel<8><<>>(b, n, m, dataset, temp, idxs); break; + case 4: + farthest_point_sampling_kernel<4><<>>(b, n, m, dataset, temp, idxs); break; + case 2: + farthest_point_sampling_kernel<2><<>>(b, n, m, dataset, temp, idxs); break; + case 1: + farthest_point_sampling_kernel<1><<>>(b, n, m, dataset, temp, idxs); break; + default: + farthest_point_sampling_kernel<512><<>>(b, n, m, dataset, temp, idxs); + } + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/sampling_gpu.h b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/sampling_gpu.h new file mode 100644 index 000000000..dc29476b1 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_batch/src/sampling_gpu.h @@ -0,0 +1,29 @@ +#ifndef _SAMPLING_GPU_H +#define _SAMPLING_GPU_H + +#include +#include +#include + + +int gather_points_wrapper_fast(int b, int c, int n, int npoints, + at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor); + +void gather_points_kernel_launcher_fast(int b, int c, int n, int npoints, + const float *points, const int *idx, float *out); + + +int gather_points_grad_wrapper_fast(int b, int c, int n, int npoints, + at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor); + +void gather_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, + const float *grad_out, const int *idx, float *grad_points); + + +int farthest_point_sampling_wrapper(int b, int n, int m, + at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor); + +void farthest_point_sampling_kernel_launcher(int b, int n, int m, + const float *dataset, float *temp, int *idxs); + +#endif diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/__init__.py b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_modules.py b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_modules.py new file mode 100644 index 000000000..0210ab296 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_modules.py @@ -0,0 +1,470 @@ +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from . import pointnet2_utils + + +def build_local_aggregation_module(input_channels, config): + local_aggregation_name = config.get('NAME', 'StackSAModuleMSG') + + if local_aggregation_name == 'StackSAModuleMSG': + mlps = config.MLPS + for k in range(len(mlps)): + mlps[k] = [input_channels] + mlps[k] + cur_layer = StackSAModuleMSG( + radii=config.POOL_RADIUS, nsamples=config.NSAMPLE, mlps=mlps, use_xyz=True, pool_method='max_pool', + ) + num_c_out = sum([x[-1] for x in mlps]) + elif local_aggregation_name == 'VectorPoolAggregationModuleMSG': + cur_layer = VectorPoolAggregationModuleMSG(input_channels=input_channels, config=config) + num_c_out = config.MSG_POST_MLPS[-1] + else: + raise NotImplementedError + + return cur_layer, num_c_out + + +class StackSAModuleMSG(nn.Module): + + def __init__(self, *, radii: List[float], nsamples: List[int], mlps: List[List[int]], + use_xyz: bool = True, pool_method='max_pool'): + """ + Args: + radii: list of float, list of radii to group with + nsamples: list of int, number of samples in each ball query + mlps: list of list of int, spec of the pointnet before the global pooling for each scale + use_xyz: + pool_method: max_pool / avg_pool + """ + super().__init__() + + assert len(radii) == len(nsamples) == len(mlps) + + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radii)): + radius = radii[i] + nsample = nsamples[i] + self.groupers.append(pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)) + mlp_spec = mlps[i] + if use_xyz: + mlp_spec[0] += 3 + + shared_mlps = [] + for k in range(len(mlp_spec) - 1): + shared_mlps.extend([ + nn.Conv2d(mlp_spec[k], mlp_spec[k + 1], kernel_size=1, bias=False), + nn.BatchNorm2d(mlp_spec[k + 1]), + nn.ReLU() + ]) + self.mlps.append(nn.Sequential(*shared_mlps)) + self.pool_method = pool_method + + self.init_weights() + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + if isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.0) + nn.init.constant_(m.bias, 0) + + def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features=None, empty_voxel_set_zeros=True): + """ + :param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features + :param xyz_batch_cnt: (batch_size), [N1, N2, ...] + :param new_xyz: (M1 + M2 ..., 3) + :param new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + :param features: (N1 + N2 ..., C) tensor of the descriptors of the the features + :return: + new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz + new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors + """ + new_features_list = [] + for k in range(len(self.groupers)): + new_features, ball_idxs = self.groupers[k]( + xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features + ) # (M1 + M2, C, nsample) + new_features = new_features.permute(1, 0, 2).unsqueeze(dim=0) # (1, C, M1 + M2 ..., nsample) + new_features = self.mlps[k](new_features) # (1, C, M1 + M2 ..., nsample) + + if self.pool_method == 'max_pool': + new_features = F.max_pool2d( + new_features, kernel_size=[1, new_features.size(3)] + ).squeeze(dim=-1) # (1, C, M1 + M2 ...) + elif self.pool_method == 'avg_pool': + new_features = F.avg_pool2d( + new_features, kernel_size=[1, new_features.size(3)] + ).squeeze(dim=-1) # (1, C, M1 + M2 ...) + else: + raise NotImplementedError + new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C) + new_features_list.append(new_features) + + new_features = torch.cat(new_features_list, dim=1) # (M1 + M2 ..., C) + + return new_xyz, new_features + + +class StackPointnetFPModule(nn.Module): + def __init__(self, *, mlp: List[int]): + """ + Args: + mlp: list of int + """ + super().__init__() + shared_mlps = [] + for k in range(len(mlp) - 1): + shared_mlps.extend([ + nn.Conv2d(mlp[k], mlp[k + 1], kernel_size=1, bias=False), + nn.BatchNorm2d(mlp[k + 1]), + nn.ReLU() + ]) + self.mlp = nn.Sequential(*shared_mlps) + + def forward(self, unknown, unknown_batch_cnt, known, known_batch_cnt, unknown_feats=None, known_feats=None): + """ + Args: + unknown: (N1 + N2 ..., 3) + known: (M1 + M2 ..., 3) + unknow_feats: (N1 + N2 ..., C1) + known_feats: (M1 + M2 ..., C2) + + Returns: + new_features: (N1 + N2 ..., C_out) + """ + dist, idx = pointnet2_utils.three_nn(unknown, unknown_batch_cnt, known, known_batch_cnt) + dist_recip = 1.0 / (dist + 1e-8) + norm = torch.sum(dist_recip, dim=-1, keepdim=True) + weight = dist_recip / norm + + interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight) + + if unknown_feats is not None: + new_features = torch.cat([interpolated_feats, unknown_feats], dim=1) # (N1 + N2 ..., C2 + C1) + else: + new_features = interpolated_feats + new_features = new_features.permute(1, 0)[None, :, :, None] # (1, C, N1 + N2 ..., 1) + new_features = self.mlp(new_features) + + new_features = new_features.squeeze(dim=0).squeeze(dim=-1).permute(1, 0) # (N1 + N2 ..., C) + return new_features + + +class VectorPoolLocalInterpolateModule(nn.Module): + def __init__(self, mlp, num_voxels, max_neighbour_distance, nsample, neighbor_type, use_xyz=True, + neighbour_distance_multiplier=1.0, xyz_encoding_type='concat'): + """ + Args: + mlp: + num_voxels: + max_neighbour_distance: + neighbor_type: 1: ball, others: cube + nsample: find all (-1), find limited number(>0) + use_xyz: + neighbour_distance_multiplier: + xyz_encoding_type: + """ + super().__init__() + self.num_voxels = num_voxels # [num_grid_x, num_grid_y, num_grid_z]: number of grids in each local area centered at new_xyz + self.num_total_grids = self.num_voxels[0] * self.num_voxels[1] * self.num_voxels[2] + self.max_neighbour_distance = max_neighbour_distance + self.neighbor_distance_multiplier = neighbour_distance_multiplier + self.nsample = nsample + self.neighbor_type = neighbor_type + self.use_xyz = use_xyz + self.xyz_encoding_type = xyz_encoding_type + + if mlp is not None: + if self.use_xyz: + mlp[0] += 9 if self.xyz_encoding_type == 'concat' else 0 + shared_mlps = [] + for k in range(len(mlp) - 1): + shared_mlps.extend([ + nn.Conv2d(mlp[k], mlp[k + 1], kernel_size=1, bias=False), + nn.BatchNorm2d(mlp[k + 1]), + nn.ReLU() + ]) + self.mlp = nn.Sequential(*shared_mlps) + else: + self.mlp = None + + self.num_avg_length_of_neighbor_idxs = 1000 + + def forward(self, support_xyz, support_features, xyz_batch_cnt, new_xyz, new_xyz_grid_centers, new_xyz_batch_cnt): + """ + Args: + support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features + support_features: (N1 + N2 ..., C) point-wise features + xyz_batch_cnt: (batch_size), [N1, N2, ...] + new_xyz: (M1 + M2 ..., 3) centers of the ball query + new_xyz_grid_centers: (M1 + M2 ..., num_total_grids, 3) grids centers of each grid + new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + Returns: + new_features: (N1 + N2 ..., C_out) + """ + with torch.no_grad(): + dist, idx, num_avg_length_of_neighbor_idxs = pointnet2_utils.three_nn_for_vector_pool_by_two_step( + support_xyz, xyz_batch_cnt, new_xyz, new_xyz_grid_centers, new_xyz_batch_cnt, + self.max_neighbour_distance, self.nsample, self.neighbor_type, + self.num_avg_length_of_neighbor_idxs, self.num_total_grids, self.neighbor_distance_multiplier + ) + self.num_avg_length_of_neighbor_idxs = max(self.num_avg_length_of_neighbor_idxs, num_avg_length_of_neighbor_idxs.item()) + + dist_recip = 1.0 / (dist + 1e-8) + norm = torch.sum(dist_recip, dim=-1, keepdim=True) + weight = dist_recip / torch.clamp_min(norm, min=1e-8) + + empty_mask = (idx.view(-1, 3)[:, 0] == -1) + idx.view(-1, 3)[empty_mask] = 0 + + interpolated_feats = pointnet2_utils.three_interpolate(support_features, idx.view(-1, 3), weight.view(-1, 3)) + interpolated_feats = interpolated_feats.view(idx.shape[0], idx.shape[1], -1) # (M1 + M2 ..., num_total_grids, C) + if self.use_xyz: + near_known_xyz = support_xyz[idx.view(-1, 3).long()].view(-1, 3, 3) # ( (M1 + M2 ...)*num_total_grids, 3) + local_xyz = (new_xyz_grid_centers.view(-1, 1, 3) - near_known_xyz).view(-1, idx.shape[1], 9) + if self.xyz_encoding_type == 'concat': + interpolated_feats = torch.cat((interpolated_feats, local_xyz), dim=-1) # ( M1 + M2 ..., num_total_grids, 9+C) + else: + raise NotImplementedError + + new_features = interpolated_feats.view(-1, interpolated_feats.shape[-1]) # ((M1 + M2 ...) * num_total_grids, C) + new_features[empty_mask, :] = 0 + if self.mlp is not None: + new_features = new_features.permute(1, 0)[None, :, :, None] # (1, C, N1 + N2 ..., 1) + new_features = self.mlp(new_features) + + new_features = new_features.squeeze(dim=0).squeeze(dim=-1).permute(1, 0) # (N1 + N2 ..., C) + return new_features + + +class VectorPoolAggregationModule(nn.Module): + def __init__( + self, input_channels, num_local_voxel=(3, 3, 3), local_aggregation_type='local_interpolation', + num_reduced_channels=30, num_channels_of_local_aggregation=32, post_mlps=(128,), + max_neighbor_distance=None, neighbor_nsample=-1, neighbor_type=0, neighbor_distance_multiplier=2.0): + super().__init__() + self.num_local_voxel = num_local_voxel + self.total_voxels = self.num_local_voxel[0] * self.num_local_voxel[1] * self.num_local_voxel[2] + self.local_aggregation_type = local_aggregation_type + assert self.local_aggregation_type in ['local_interpolation', 'voxel_avg_pool', 'voxel_random_choice'] + self.input_channels = input_channels + self.num_reduced_channels = input_channels if num_reduced_channels is None else num_reduced_channels + self.num_channels_of_local_aggregation = num_channels_of_local_aggregation + self.max_neighbour_distance = max_neighbor_distance + self.neighbor_nsample = neighbor_nsample + self.neighbor_type = neighbor_type # 1: ball, others: cube + + if self.local_aggregation_type == 'local_interpolation': + self.local_interpolate_module = VectorPoolLocalInterpolateModule( + mlp=None, num_voxels=self.num_local_voxel, + max_neighbour_distance=self.max_neighbour_distance, + nsample=self.neighbor_nsample, + neighbor_type=self.neighbor_type, + neighbour_distance_multiplier=neighbor_distance_multiplier, + ) + num_c_in = (self.num_reduced_channels + 9) * self.total_voxels + else: + self.local_interpolate_module = None + num_c_in = (self.num_reduced_channels + 3) * self.total_voxels + + num_c_out = self.total_voxels * self.num_channels_of_local_aggregation + + self.separate_local_aggregation_layer = nn.Sequential( + nn.Conv1d(num_c_in, num_c_out, kernel_size=1, groups=self.total_voxels, bias=False), + nn.BatchNorm1d(num_c_out), + nn.ReLU() + ) + + post_mlp_list = [] + c_in = num_c_out + for cur_num_c in post_mlps: + post_mlp_list.extend([ + nn.Conv1d(c_in, cur_num_c, kernel_size=1, bias=False), + nn.BatchNorm1d(cur_num_c), + nn.ReLU() + ]) + c_in = cur_num_c + self.post_mlps = nn.Sequential(*post_mlp_list) + + self.num_mean_points_per_grid = 20 + self.init_weights() + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d): + nn.init.kaiming_normal_(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight, 1.0) + nn.init.constant_(m.bias, 0) + + def extra_repr(self) -> str: + ret = f'radius={self.max_neighbour_distance}, local_voxels=({self.num_local_voxel}, ' \ + f'local_aggregation_type={self.local_aggregation_type}, ' \ + f'num_c_reduction={self.input_channels}->{self.num_reduced_channels}, ' \ + f'num_c_local_aggregation={self.num_channels_of_local_aggregation}' + return ret + + def vector_pool_with_voxel_query(self, xyz, xyz_batch_cnt, features, new_xyz, new_xyz_batch_cnt): + use_xyz = 1 + pooling_type = 0 if self.local_aggregation_type == 'voxel_avg_pool' else 1 + + new_features, new_local_xyz, num_mean_points_per_grid, point_cnt_of_grid = pointnet2_utils.vector_pool_with_voxel_query_op( + xyz, xyz_batch_cnt, features, new_xyz, new_xyz_batch_cnt, + self.num_local_voxel[0], self.num_local_voxel[1], self.num_local_voxel[2], + self.max_neighbour_distance, self.num_reduced_channels, use_xyz, + self.num_mean_points_per_grid, self.neighbor_nsample, self.neighbor_type, + pooling_type + ) + self.num_mean_points_per_grid = max(self.num_mean_points_per_grid, num_mean_points_per_grid.item()) + + num_new_pts = new_features.shape[0] + new_local_xyz = new_local_xyz.view(num_new_pts, -1, 3) # (N, num_voxel, 3) + new_features = new_features.view(num_new_pts, -1, self.num_reduced_channels) # (N, num_voxel, C) + new_features = torch.cat((new_local_xyz, new_features), dim=-1).view(num_new_pts, -1) + + return new_features, point_cnt_of_grid + + @staticmethod + def get_dense_voxels_by_center(point_centers, max_neighbour_distance, num_voxels): + """ + Args: + point_centers: (N, 3) + max_neighbour_distance: float + num_voxels: [num_x, num_y, num_z] + + Returns: + voxel_centers: (N, total_voxels, 3) + """ + R = max_neighbour_distance + device = point_centers.device + x_grids = torch.arange(-R + R / num_voxels[0], R - R / num_voxels[0] + 1e-5, 2 * R / num_voxels[0], device=device) + y_grids = torch.arange(-R + R / num_voxels[1], R - R / num_voxels[1] + 1e-5, 2 * R / num_voxels[1], device=device) + z_grids = torch.arange(-R + R / num_voxels[2], R - R / num_voxels[2] + 1e-5, 2 * R / num_voxels[2], device=device) + x_offset, y_offset, z_offset = torch.meshgrid(x_grids, y_grids, z_grids) # shape: [num_x, num_y, num_z] + xyz_offset = torch.cat(( + x_offset.contiguous().view(-1, 1), + y_offset.contiguous().view(-1, 1), + z_offset.contiguous().view(-1, 1)), dim=-1 + ) + voxel_centers = point_centers[:, None, :] + xyz_offset[None, :, :] + return voxel_centers + + def vector_pool_with_local_interpolate(self, xyz, xyz_batch_cnt, features, new_xyz, new_xyz_batch_cnt): + """ + Args: + xyz: (N, 3) + xyz_batch_cnt: (batch_size) + features: (N, C) + new_xyz: (M, 3) + new_xyz_batch_cnt: (batch_size) + Returns: + new_features: (M, total_voxels * C) + """ + voxel_centers = self.get_dense_voxels_by_center( + point_centers=new_xyz, max_neighbour_distance=self.max_neighbour_distance, num_voxels=self.num_local_voxel + ) # (M1 + M2 + ..., total_voxels, 3) + voxel_features = self.local_interpolate_module.forward( + support_xyz=xyz, support_features=features, xyz_batch_cnt=xyz_batch_cnt, + new_xyz=new_xyz, new_xyz_grid_centers=voxel_centers, new_xyz_batch_cnt=new_xyz_batch_cnt + ) # ((M1 + M2 ...) * total_voxels, C) + + voxel_features = voxel_features.contiguous().view(-1, self.total_voxels * voxel_features.shape[-1]) + return voxel_features + + def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features, **kwargs): + """ + :param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features + :param xyz_batch_cnt: (batch_size), [N1, N2, ...] + :param new_xyz: (M1 + M2 ..., 3) + :param new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + :param features: (N1 + N2 ..., C) tensor of the descriptors of the the features + :return: + new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz + new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors + """ + N, C = features.shape + + assert C % self.num_reduced_channels == 0, \ + f'the input channels ({C}) should be an integral multiple of num_reduced_channels({self.num_reduced_channels})' + + features = features.view(N, -1, self.num_reduced_channels).sum(dim=1) + + if self.local_aggregation_type in ['voxel_avg_pool', 'voxel_random_choice']: + vector_features, point_cnt_of_grid = self.vector_pool_with_voxel_query( + xyz=xyz, xyz_batch_cnt=xyz_batch_cnt, features=features, + new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt + ) + elif self.local_aggregation_type == 'local_interpolation': + vector_features = self.vector_pool_with_local_interpolate( + xyz=xyz, xyz_batch_cnt=xyz_batch_cnt, features=features, + new_xyz=new_xyz, new_xyz_batch_cnt=new_xyz_batch_cnt + ) # (M1 + M2 + ..., total_voxels * C) + else: + raise NotImplementedError + + vector_features = vector_features.permute(1, 0)[None, :, :] # (1, num_voxels * C, M1 + M2 ...) + + new_features = self.separate_local_aggregation_layer(vector_features) + + new_features = self.post_mlps(new_features) + new_features = new_features.squeeze(dim=0).permute(1, 0) + return new_xyz, new_features + + +class VectorPoolAggregationModuleMSG(nn.Module): + def __init__(self, input_channels, config): + super().__init__() + self.model_cfg = config + self.num_groups = self.model_cfg.NUM_GROUPS + + self.layers = [] + c_in = 0 + for k in range(self.num_groups): + cur_config = self.model_cfg[f'GROUP_CFG_{k}'] + cur_vector_pool_module = VectorPoolAggregationModule( + input_channels=input_channels, num_local_voxel=cur_config.NUM_LOCAL_VOXEL, + post_mlps=cur_config.POST_MLPS, + max_neighbor_distance=cur_config.MAX_NEIGHBOR_DISTANCE, + neighbor_nsample=cur_config.NEIGHBOR_NSAMPLE, + local_aggregation_type=self.model_cfg.LOCAL_AGGREGATION_TYPE, + num_reduced_channels=self.model_cfg.get('NUM_REDUCED_CHANNELS', None), + num_channels_of_local_aggregation=self.model_cfg.NUM_CHANNELS_OF_LOCAL_AGGREGATION, + neighbor_distance_multiplier=2.0 + ) + self.__setattr__(f'layer_{k}', cur_vector_pool_module) + c_in += cur_config.POST_MLPS[-1] + + c_in += 3 # use_xyz + + shared_mlps = [] + for cur_num_c in self.model_cfg.MSG_POST_MLPS: + shared_mlps.extend([ + nn.Conv1d(c_in, cur_num_c, kernel_size=1, bias=False), + nn.BatchNorm1d(cur_num_c), + nn.ReLU() + ]) + c_in = cur_num_c + self.msg_post_mlps = nn.Sequential(*shared_mlps) + + def forward(self, **kwargs): + features_list = [] + for k in range(self.num_groups): + cur_xyz, cur_features = self.__getattr__(f'layer_{k}')(**kwargs) + features_list.append(cur_features) + + features = torch.cat(features_list, dim=-1) + features = torch.cat((cur_xyz, features), dim=-1) + features = features.permute(1, 0)[None, :, :] # (1, C, N) + new_features = self.msg_post_mlps(features) + new_features = new_features.squeeze(dim=0).permute(1, 0) # (N, C) + + return cur_xyz, new_features diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_utils.py b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_utils.py new file mode 100644 index 000000000..cd2c1f341 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/pointnet2_utils.py @@ -0,0 +1,457 @@ +import torch +import torch.nn as nn +from torch.autograd import Function, Variable + +from . import pointnet2_stack_cuda as pointnet2 + + +class BallQuery(Function): + + @staticmethod + def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, xyz_batch_cnt: torch.Tensor, + new_xyz: torch.Tensor, new_xyz_batch_cnt): + """ + Args: + ctx: + radius: float, radius of the balls + nsample: int, maximum number of features in the balls + xyz: (N1 + N2 ..., 3) xyz coordinates of the features + xyz_batch_cnt: (batch_size), [N1, N2, ...] + new_xyz: (M1 + M2 ..., 3) centers of the ball query + new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + + Returns: + idx: (M1 + M2, nsample) tensor with the indicies of the features that form the query balls + """ + assert new_xyz.is_contiguous() + assert new_xyz_batch_cnt.is_contiguous() + assert xyz.is_contiguous() + assert xyz_batch_cnt.is_contiguous() + + B = xyz_batch_cnt.shape[0] + M = new_xyz.shape[0] + idx = torch.cuda.IntTensor(M, nsample).zero_() + + pointnet2.ball_query_wrapper(B, M, radius, nsample, new_xyz, new_xyz_batch_cnt, xyz, xyz_batch_cnt, idx) + empty_ball_mask = (idx[:, 0] == -1) + idx[empty_ball_mask] = 0 + + ctx.mark_non_differentiable(idx) + ctx.mark_non_differentiable(empty_ball_mask) + + return idx, empty_ball_mask + + @staticmethod + def backward(ctx, a=None, b=None): + return None, None, None, None, None, None + + +ball_query = BallQuery.apply + + +class GroupingOperation(Function): + + @staticmethod + def forward(ctx, features: torch.Tensor, features_batch_cnt: torch.Tensor, + idx: torch.Tensor, idx_batch_cnt: torch.Tensor): + """ + Args: + ctx: + features: (N1 + N2 ..., C) tensor of features to group + features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with + idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with + idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with + + Returns: + output: (M1 + M2, C, nsample) tensor + """ + assert features.is_contiguous() + assert features_batch_cnt.is_contiguous() + assert idx.is_contiguous() + assert idx_batch_cnt.is_contiguous() + + assert features.shape[0] == features_batch_cnt.sum(), \ + 'features: %s, features_batch_cnt: %s' % (str(features.shape), str(features_batch_cnt)) + assert idx.shape[0] == idx_batch_cnt.sum(), \ + 'idx: %s, idx_batch_cnt: %s' % (str(idx.shape), str(idx_batch_cnt)) + + M, nsample = idx.size() + N, C = features.size() + B = idx_batch_cnt.shape[0] + output = torch.cuda.FloatTensor(M, C, nsample) + + pointnet2.group_points_wrapper(B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, output) + + ctx.for_backwards = (B, N, idx, features_batch_cnt, idx_batch_cnt) + return output + + @staticmethod + def backward(ctx, grad_out: torch.Tensor): + """ + Args: + ctx: + grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the output from forward + + Returns: + grad_features: (N1 + N2 ..., C) gradient of the features + """ + B, N, idx, features_batch_cnt, idx_batch_cnt = ctx.for_backwards + + M, C, nsample = grad_out.size() + grad_features = Variable(torch.cuda.FloatTensor(N, C).zero_()) + + grad_out_data = grad_out.data.contiguous() + pointnet2.group_points_grad_wrapper(B, M, C, N, nsample, grad_out_data, idx, + idx_batch_cnt, features_batch_cnt, grad_features.data) + return grad_features, None, None, None + + +grouping_operation = GroupingOperation.apply + + +class QueryAndGroup(nn.Module): + def __init__(self, radius: float, nsample: int, use_xyz: bool = True): + """ + Args: + radius: float, radius of ball + nsample: int, maximum number of features to gather in the ball + use_xyz: + """ + super().__init__() + self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz + + def forward(self, xyz: torch.Tensor, xyz_batch_cnt: torch.Tensor, + new_xyz: torch.Tensor, new_xyz_batch_cnt: torch.Tensor, + features: torch.Tensor = None): + """ + Args: + xyz: (N1 + N2 ..., 3) xyz coordinates of the features + xyz_batch_cnt: (batch_size), [N1, N2, ...] + new_xyz: (M1 + M2 ..., 3) centers of the ball query + new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + features: (N1 + N2 ..., C) tensor of features to group + + Returns: + new_features: (M1 + M2, C, nsample) tensor + """ + assert xyz.shape[0] == xyz_batch_cnt.sum(), 'xyz: %s, xyz_batch_cnt: %s' % (str(xyz.shape), str(new_xyz_batch_cnt)) + assert new_xyz.shape[0] == new_xyz_batch_cnt.sum(), \ + 'new_xyz: %s, new_xyz_batch_cnt: %s' % (str(new_xyz.shape), str(new_xyz_batch_cnt)) + + # idx: (M1 + M2 ..., nsample), empty_ball_mask: (M1 + M2 ...) + idx, empty_ball_mask = ball_query(self.radius, self.nsample, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt) + grouped_xyz = grouping_operation(xyz, xyz_batch_cnt, idx, new_xyz_batch_cnt) # (M1 + M2, 3, nsample) + grouped_xyz -= new_xyz.unsqueeze(-1) + + grouped_xyz[empty_ball_mask] = 0 + + if features is not None: + grouped_features = grouping_operation(features, xyz_batch_cnt, idx, new_xyz_batch_cnt) # (M1 + M2, C, nsample) + grouped_features[empty_ball_mask] = 0 + if self.use_xyz: + new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (M1 + M2 ..., C + 3, nsample) + else: + new_features = grouped_features + else: + assert self.use_xyz, "Cannot have not features and not use xyz as a feature!" + new_features = grouped_xyz + + return new_features, idx + + +class FarthestPointSampling(Function): + @staticmethod + def forward(ctx, xyz: torch.Tensor, npoint: int): + """ + Args: + ctx: + xyz: (B, N, 3) where N > npoint + npoint: int, number of features in the sampled set + + Returns: + output: (B, npoint) tensor containing the set + """ + assert xyz.is_contiguous() + + B, N, _ = xyz.size() + output = torch.cuda.IntTensor(B, npoint) + temp = torch.cuda.FloatTensor(B, N).fill_(1e10) + + pointnet2.farthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output) + return output + + @staticmethod + def backward(xyz, a=None): + return None, None + + +farthest_point_sample = furthest_point_sample = FarthestPointSampling.apply + + +class StackFarthestPointSampling(Function): + @staticmethod + def forward(ctx, xyz, xyz_batch_cnt, npoint): + """ + Args: + ctx: + xyz: (N1 + N2 + ..., 3) where N > npoint + xyz_batch_cnt: [N1, N2, ...] + npoint: int, number of features in the sampled set + + Returns: + output: (npoint.sum()) tensor containing the set, + npoint: (M1, M2, ...) + """ + assert xyz.is_contiguous() and xyz.shape[1] == 3 + + batch_size = xyz_batch_cnt.__len__() + if not isinstance(npoint, torch.Tensor): + if not isinstance(npoint, list): + npoint = [npoint for i in range(batch_size)] + npoint = torch.tensor(npoint, device=xyz.device).int() + + N, _ = xyz.size() + temp = torch.cuda.FloatTensor(N).fill_(1e10) + output = torch.cuda.IntTensor(npoint.sum().item()) + + pointnet2.stack_farthest_point_sampling_wrapper(xyz, temp, xyz_batch_cnt, output, npoint) + return output + + @staticmethod + def backward(xyz, a=None): + return None, None + + +stack_farthest_point_sample = StackFarthestPointSampling.apply + + +class ThreeNN(Function): + @staticmethod + def forward(ctx, unknown, unknown_batch_cnt, known, known_batch_cnt): + """ + Args: + ctx: + unknown: (N1 + N2..., 3) + unknown_batch_cnt: (batch_size), [N1, N2, ...] + known: (M1 + M2..., 3) + known_batch_cnt: (batch_size), [M1, M2, ...] + + Returns: + dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors + idx: (N1 + N2 ..., 3) index of the three nearest neighbors, range [0, M1+M2+...] + """ + assert unknown.shape.__len__() == 2 and unknown.shape[1] == 3 + assert known.shape.__len__() == 2 and known.shape[1] == 3 + assert unknown_batch_cnt.__len__() == known_batch_cnt.__len__() + + dist2 = unknown.new_zeros(unknown.shape) + idx = unknown_batch_cnt.new_zeros(unknown.shape).int() + + pointnet2.three_nn_wrapper( + unknown.contiguous(), unknown_batch_cnt.contiguous(), + known.contiguous(), known_batch_cnt.contiguous(), dist2, idx + ) + return torch.sqrt(dist2), idx + + @staticmethod + def backward(ctx, a=None, b=None): + return None, None + + +three_nn = ThreeNN.apply + + +class ThreeInterpolate(Function): + + @staticmethod + def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor): + """ + Args: + ctx: + features: (M1 + M2 ..., C) + idx: [N1 + N2 ..., 3] + weight: [N1 + N2 ..., 3] + + Returns: + out_tensor: (N1 + N2 ..., C) + """ + assert idx.shape[0] == weight.shape[0] and idx.shape[1] == weight.shape[1] == 3 + + ctx.three_interpolate_for_backward = (idx, weight, features.shape[0]) + output = features.new_zeros((idx.shape[0], features.shape[1])) + pointnet2.three_interpolate_wrapper(features.contiguous(), idx.contiguous(), weight.contiguous(), output) + return output + + @staticmethod + def backward(ctx, grad_out: torch.Tensor): + """ + Args: + ctx: + grad_out: (N1 + N2 ..., C) + + Returns: + grad_features: (M1 + M2 ..., C) + """ + idx, weight, M = ctx.three_interpolate_for_backward + grad_features = grad_out.new_zeros((M, grad_out.shape[1])) + pointnet2.three_interpolate_grad_wrapper( + grad_out.contiguous(), idx.contiguous(), weight.contiguous(), grad_features + ) + return grad_features, None, None + + +three_interpolate = ThreeInterpolate.apply + + +class ThreeNNForVectorPoolByTwoStep(Function): + @staticmethod + def forward(ctx, support_xyz, xyz_batch_cnt, new_xyz, new_xyz_grid_centers, new_xyz_batch_cnt, + max_neighbour_distance, nsample, neighbor_type, avg_length_of_neighbor_idxs, num_total_grids, + neighbor_distance_multiplier): + """ + Args: + ctx: + // support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features + // xyz_batch_cnt: (batch_size), [N1, N2, ...] + // new_xyz: (M1 + M2 ..., 3) centers of the ball query + // new_xyz_grid_centers: (M1 + M2 ..., num_total_grids, 3) grids centers of each grid + // new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + // nsample: find all (-1), find limited number(>0) + // neighbor_type: 1: ball, others: cube + // neighbor_distance_multiplier: query_distance = neighbor_distance_multiplier * max_neighbour_distance + + Returns: + // new_xyz_grid_idxs: (M1 + M2 ..., num_total_grids, 3) three-nn + // new_xyz_grid_dist2: (M1 + M2 ..., num_total_grids, 3) square of dist of three-nn + """ + num_new_xyz = new_xyz.shape[0] + new_xyz_grid_dist2 = new_xyz_grid_centers.new_zeros(new_xyz_grid_centers.shape) + new_xyz_grid_idxs = new_xyz_grid_centers.new_zeros(new_xyz_grid_centers.shape).int().fill_(-1) + + while True: + num_max_sum_points = avg_length_of_neighbor_idxs * num_new_xyz + stack_neighbor_idxs = new_xyz_grid_idxs.new_zeros(num_max_sum_points) + start_len = new_xyz_grid_idxs.new_zeros(num_new_xyz, 2).int() + cumsum = new_xyz_grid_idxs.new_zeros(1) + + pointnet2.query_stacked_local_neighbor_idxs_wrapper_stack( + support_xyz.contiguous(), xyz_batch_cnt.contiguous(), + new_xyz.contiguous(), new_xyz_batch_cnt.contiguous(), + stack_neighbor_idxs.contiguous(), start_len.contiguous(), cumsum, + avg_length_of_neighbor_idxs, max_neighbour_distance * neighbor_distance_multiplier, + nsample, neighbor_type + ) + avg_length_of_neighbor_idxs = cumsum[0].item() // num_new_xyz + int(cumsum[0].item() % num_new_xyz > 0) + + if cumsum[0] <= num_max_sum_points: + break + + stack_neighbor_idxs = stack_neighbor_idxs[:cumsum[0]] + pointnet2.query_three_nn_by_stacked_local_idxs_wrapper_stack( + support_xyz, new_xyz, new_xyz_grid_centers, new_xyz_grid_idxs, new_xyz_grid_dist2, + stack_neighbor_idxs, start_len, num_new_xyz, num_total_grids + ) + + return torch.sqrt(new_xyz_grid_dist2), new_xyz_grid_idxs, torch.tensor(avg_length_of_neighbor_idxs) + + +three_nn_for_vector_pool_by_two_step = ThreeNNForVectorPoolByTwoStep.apply + + +class VectorPoolWithVoxelQuery(Function): + @staticmethod + def forward(ctx, support_xyz: torch.Tensor, xyz_batch_cnt: torch.Tensor, support_features: torch.Tensor, + new_xyz: torch.Tensor, new_xyz_batch_cnt: torch.Tensor, num_grid_x, num_grid_y, num_grid_z, + max_neighbour_distance, num_c_out_each_grid, use_xyz, + num_mean_points_per_grid=100, nsample=-1, neighbor_type=0, pooling_type=0): + """ + Args: + ctx: + support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features + xyz_batch_cnt: (batch_size), [N1, N2, ...] + support_features: (N1 + N2 ..., C) + new_xyz: (M1 + M2 ..., 3) centers of new positions + new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + num_grid_x: number of grids in each local area centered at new_xyz + num_grid_y: + num_grid_z: + max_neighbour_distance: + num_c_out_each_grid: + use_xyz: + neighbor_type: 1: ball, others: cube: + pooling_type: 0: avg_pool, 1: random choice + Returns: + new_features: (M1 + M2 ..., num_c_out) + """ + assert support_xyz.is_contiguous() + assert support_features.is_contiguous() + assert xyz_batch_cnt.is_contiguous() + assert new_xyz.is_contiguous() + assert new_xyz_batch_cnt.is_contiguous() + num_total_grids = num_grid_x * num_grid_y * num_grid_z + num_c_out = num_c_out_each_grid * num_total_grids + N, num_c_in = support_features.shape + M = new_xyz.shape[0] + + assert num_c_in % num_c_out_each_grid == 0, \ + f'the input channels ({num_c_in}) should be an integral multiple of num_c_out_each_grid({num_c_out_each_grid})' + + while True: + new_features = support_features.new_zeros((M, num_c_out)) + new_local_xyz = support_features.new_zeros((M, 3 * num_total_grids)) + point_cnt_of_grid = xyz_batch_cnt.new_zeros((M, num_total_grids)) + + num_max_sum_points = num_mean_points_per_grid * M + grouped_idxs = xyz_batch_cnt.new_zeros((num_max_sum_points, 3)) + + num_cum_sum = pointnet2.vector_pool_wrapper( + support_xyz, xyz_batch_cnt, support_features, new_xyz, new_xyz_batch_cnt, + new_features, new_local_xyz, point_cnt_of_grid, grouped_idxs, + num_grid_x, num_grid_y, num_grid_z, max_neighbour_distance, use_xyz, + num_max_sum_points, nsample, neighbor_type, pooling_type + ) + num_mean_points_per_grid = num_cum_sum // M + int(num_cum_sum % M > 0) + if num_cum_sum <= num_max_sum_points: + break + + grouped_idxs = grouped_idxs[:num_cum_sum] + + normalizer = torch.clamp_min(point_cnt_of_grid[:, :, None].float(), min=1e-6) + new_features = (new_features.view(-1, num_total_grids, num_c_out_each_grid) / normalizer).view(-1, num_c_out) + + if use_xyz: + new_local_xyz = (new_local_xyz.view(-1, num_total_grids, 3) / normalizer).view(-1, num_total_grids * 3) + + num_mean_points_per_grid = torch.Tensor([num_mean_points_per_grid]).int() + nsample = torch.Tensor([nsample]).int() + ctx.vector_pool_for_backward = (point_cnt_of_grid, grouped_idxs, N, num_c_in) + ctx.mark_non_differentiable(new_local_xyz, num_mean_points_per_grid, nsample, point_cnt_of_grid) + return new_features, new_local_xyz, num_mean_points_per_grid, point_cnt_of_grid + + @staticmethod + def backward(ctx, grad_new_features: torch.Tensor, grad_local_xyz: torch.Tensor, grad_num_cum_sum, grad_point_cnt_of_grid): + """ + Args: + ctx: + grad_new_features: (M1 + M2 ..., num_c_out), num_c_out = num_c_out_each_grid * num_total_grids + + Returns: + grad_support_features: (N1 + N2 ..., C_in) + """ + point_cnt_of_grid, grouped_idxs, N, num_c_in = ctx.vector_pool_for_backward + grad_support_features = grad_new_features.new_zeros((N, num_c_in)) + + if grouped_idxs.shape[0] > 0: + pointnet2.vector_pool_grad_wrapper( + grad_new_features.contiguous(), point_cnt_of_grid, grouped_idxs, + grad_support_features + ) + + return None, None, grad_support_features, None, None, None, None, None, None, None, None, None, None, None, None + + +vector_pool_with_voxel_query_op = VectorPoolWithVoxelQuery.apply + + +if __name__ == '__main__': + pass diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query.cpp b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query.cpp new file mode 100644 index 000000000..3376f75fa --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query.cpp @@ -0,0 +1,45 @@ +/* +Stacked-batch-data version of ball query, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#include +#include +#include +#include +#include "ball_query_gpu.h" + +#define CHECK_CUDA(x) do { \ + if (!x.type().is_cuda()) { \ + fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_CONTIGUOUS(x) do { \ + if (!x.is_contiguous()) { \ + fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) + + +int ball_query_wrapper_stack(int B, int M, float radius, int nsample, + at::Tensor new_xyz_tensor, at::Tensor new_xyz_batch_cnt_tensor, + at::Tensor xyz_tensor, at::Tensor xyz_batch_cnt_tensor, at::Tensor idx_tensor) { + CHECK_INPUT(new_xyz_tensor); + CHECK_INPUT(xyz_tensor); + CHECK_INPUT(new_xyz_batch_cnt_tensor); + CHECK_INPUT(xyz_batch_cnt_tensor); + + const float *new_xyz = new_xyz_tensor.data(); + const float *xyz = xyz_tensor.data(); + const int *new_xyz_batch_cnt = new_xyz_batch_cnt_tensor.data(); + const int *xyz_batch_cnt = xyz_batch_cnt_tensor.data(); + int *idx = idx_tensor.data(); + + ball_query_kernel_launcher_stack(B, M, radius, nsample, new_xyz, new_xyz_batch_cnt, xyz, xyz_batch_cnt, idx); + return 1; +} diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query_gpu.cu b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query_gpu.cu new file mode 100644 index 000000000..adaa6b1e8 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query_gpu.cu @@ -0,0 +1,90 @@ +/* +Stacked-batch-data version of ball query, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#include +#include +#include + +#include "ball_query_gpu.h" +#include "cuda_utils.h" + + +__global__ void ball_query_kernel_stack(int B, int M, float radius, int nsample, \ + const float *new_xyz, const int *new_xyz_batch_cnt, const float *xyz, const int *xyz_batch_cnt, int *idx) { + // :param xyz: (N1 + N2 ..., 3) xyz coordinates of the features + // :param xyz_batch_cnt: (batch_size), [N1, N2, ...] + // :param new_xyz: (M1 + M2 ..., 3) centers of the ball query + // :param new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + // output: + // idx: (M, nsample) + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (pt_idx >= M) return; + + int bs_idx = 0, pt_cnt = new_xyz_batch_cnt[0]; + for (int k = 1; k < B; k++){ + if (pt_idx < pt_cnt) break; + pt_cnt += new_xyz_batch_cnt[k]; + bs_idx = k; + } + + int xyz_batch_start_idx = 0; + for (int k = 0; k < bs_idx; k++) xyz_batch_start_idx += xyz_batch_cnt[k]; + // for (int k = 0; k < bs_idx; k++) new_xyz_batch_start_idx += new_xyz_batch_cnt[k]; + + new_xyz += pt_idx * 3; + xyz += xyz_batch_start_idx * 3; + idx += pt_idx * nsample; + + float radius2 = radius * radius; + float new_x = new_xyz[0]; + float new_y = new_xyz[1]; + float new_z = new_xyz[2]; + int n = xyz_batch_cnt[bs_idx]; + + int cnt = 0; + for (int k = 0; k < n; ++k) { + float x = xyz[k * 3 + 0]; + float y = xyz[k * 3 + 1]; + float z = xyz[k * 3 + 2]; + float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z); + if (d2 < radius2){ + if (cnt == 0){ + for (int l = 0; l < nsample; ++l) { + idx[l] = k; + } + } + idx[cnt] = k; + ++cnt; + if (cnt >= nsample) break; + } + } + if (cnt == 0) idx[0] = -1; +} + + +void ball_query_kernel_launcher_stack(int B, int M, float radius, int nsample, + const float *new_xyz, const int *new_xyz_batch_cnt, const float *xyz, const int *xyz_batch_cnt, int *idx){ + // :param xyz: (N1 + N2 ..., 3) xyz coordinates of the features + // :param xyz_batch_cnt: (batch_size), [N1, N2, ...] + // :param new_xyz: (M1 + M2 ..., 3) centers of the ball query + // :param new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + // output: + // idx: (M, nsample) + + cudaError_t err; + + dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + ball_query_kernel_stack<<>>(B, M, radius, nsample, new_xyz, new_xyz_batch_cnt, xyz, xyz_batch_cnt, idx); + // cudaDeviceSynchronize(); // for using printf in kernel function + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query_gpu.h b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query_gpu.h new file mode 100644 index 000000000..c74f12018 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/ball_query_gpu.h @@ -0,0 +1,25 @@ +/* +Stacked-batch-data version of ball query, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#ifndef _STACK_BALL_QUERY_GPU_H +#define _STACK_BALL_QUERY_GPU_H + +#include +#include +#include +#include + +int ball_query_wrapper_stack(int B, int M, float radius, int nsample, + at::Tensor new_xyz_tensor, at::Tensor new_xyz_batch_cnt_tensor, + at::Tensor xyz_tensor, at::Tensor xyz_batch_cnt_tensor, at::Tensor idx_tensor); + + +void ball_query_kernel_launcher_stack(int B, int M, float radius, int nsample, + const float *new_xyz, const int *new_xyz_batch_cnt, const float *xyz, const int *xyz_batch_cnt, int *idx); + + +#endif diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/cuda_utils.h b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/cuda_utils.h new file mode 100644 index 000000000..c1670f1c8 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/cuda_utils.h @@ -0,0 +1,9 @@ +#ifndef _STACK_CUDA_UTILS_H +#define _STACK_CUDA_UTILS_H + +#include + +#define THREADS_PER_BLOCK 256 +#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) + +#endif diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/group_points.cpp b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/group_points.cpp new file mode 100644 index 000000000..d882c597f --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/group_points.cpp @@ -0,0 +1,68 @@ +/* +Stacked-batch-data version of point grouping, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#include +#include +#include +#include +#include "group_points_gpu.h" + +#define CHECK_CUDA(x) do { \ + if (!x.type().is_cuda()) { \ + fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_CONTIGUOUS(x) do { \ + if (!x.is_contiguous()) { \ + fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) + + +int group_points_grad_wrapper_stack(int B, int M, int C, int N, int nsample, + at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, + at::Tensor features_batch_cnt_tensor, at::Tensor grad_features_tensor) { + + CHECK_INPUT(grad_out_tensor); + CHECK_INPUT(idx_tensor); + CHECK_INPUT(idx_batch_cnt_tensor); + CHECK_INPUT(features_batch_cnt_tensor); + CHECK_INPUT(grad_features_tensor); + + const float *grad_out = grad_out_tensor.data(); + const int *idx = idx_tensor.data(); + const int *idx_batch_cnt = idx_batch_cnt_tensor.data(); + const int *features_batch_cnt = features_batch_cnt_tensor.data(); + float *grad_features = grad_features_tensor.data(); + + group_points_grad_kernel_launcher_stack(B, M, C, N, nsample, grad_out, idx, idx_batch_cnt, features_batch_cnt, grad_features); + return 1; +} + + +int group_points_wrapper_stack(int B, int M, int C, int nsample, + at::Tensor features_tensor, at::Tensor features_batch_cnt_tensor, + at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, at::Tensor out_tensor) { + + CHECK_INPUT(features_tensor); + CHECK_INPUT(features_batch_cnt_tensor); + CHECK_INPUT(idx_tensor); + CHECK_INPUT(idx_batch_cnt_tensor); + CHECK_INPUT(out_tensor); + + const float *features = features_tensor.data(); + const int *idx = idx_tensor.data(); + const int *features_batch_cnt = features_batch_cnt_tensor.data(); + const int *idx_batch_cnt = idx_batch_cnt_tensor.data(); + float *out = out_tensor.data(); + + group_points_kernel_launcher_stack(B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, out); + return 1; +} \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/group_points_gpu.cu b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/group_points_gpu.cu new file mode 100644 index 000000000..62e341e9c --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/group_points_gpu.cu @@ -0,0 +1,125 @@ +/* +Stacked-batch-data version of point grouping, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#include +#include + +#include "cuda_utils.h" +#include "group_points_gpu.h" + + +__global__ void group_points_grad_kernel_stack(int B, int M, int C, int N, int nsample, + const float *grad_out, const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt, float *grad_features) { + // :param grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the output from forward + // :param idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with + // :param idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with + // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with + // :return: + // grad_features: (N1 + N2 ..., C) gradient of the features + int index = blockIdx.x * blockDim.x + threadIdx.x; + int sample_idx = index % nsample; + int C_idx = (index / nsample) % C; + int pt_idx = (index / nsample / C); + + if (pt_idx >= M || C_idx >= C || sample_idx >= nsample) return; + + int bs_idx = 0, pt_cnt = idx_batch_cnt[0]; + for (int k = 1; k < B; k++){ + if (pt_idx < pt_cnt) break; + pt_cnt += idx_batch_cnt[k]; + bs_idx = k; + } + + int features_batch_start_idx = 0; + for (int k = 0; k < bs_idx; k++) features_batch_start_idx += features_batch_cnt[k]; + + grad_out += pt_idx * C * nsample + C_idx * nsample + sample_idx; + idx += pt_idx * nsample + sample_idx; + grad_features += (features_batch_start_idx + idx[0]) * C + C_idx; + + atomicAdd(grad_features, grad_out[0]); +} + +void group_points_grad_kernel_launcher_stack(int B, int M, int C, int N, int nsample, + const float *grad_out, const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt, float *grad_features) { + // :param grad_out: (M1 + M2 ..., C, nsample) tensor of the gradients of the output from forward + // :param idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with + // :param idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with + // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with + // :return: + // grad_features: (N1 + N2 ..., C) gradient of the features + + cudaError_t err; + // dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) + dim3 blocks(DIVUP(M * C * nsample, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + group_points_grad_kernel_stack<<>>(B, M, C, N, nsample, grad_out, idx, idx_batch_cnt, features_batch_cnt, grad_features); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} + + +__global__ void group_points_kernel_stack(int B, int M, int C, int nsample, + const float *features, const int *features_batch_cnt, const int *idx, const int *idx_batch_cnt, float *out) { + // :param features: (N1 + N2 ..., C) tensor of features to group + // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with + // :param idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with + // :param idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with + // :return: + // output: (M1 + M2, C, nsample) tensor + int index = blockIdx.x * blockDim.x + threadIdx.x; + int sample_idx = index % nsample; + int C_idx = (index / nsample) % C; + int pt_idx = (index / nsample / C); + + if (pt_idx >= M || C_idx >= C || sample_idx >= nsample) return; + + int bs_idx = 0, pt_cnt = idx_batch_cnt[0]; + for (int k = 1; k < B; k++){ + if (pt_idx < pt_cnt) break; + pt_cnt += idx_batch_cnt[k]; + bs_idx = k; + } + + int features_batch_start_idx = 0; + for (int k = 0; k < bs_idx; k++) features_batch_start_idx += features_batch_cnt[k]; + features += features_batch_start_idx * C; + + idx += pt_idx * nsample + sample_idx; + int in_idx = idx[0] * C + C_idx; + int out_idx = pt_idx * C * nsample + C_idx * nsample + sample_idx; + + out[out_idx] = features[in_idx]; +} + + +void group_points_kernel_launcher_stack(int B, int M, int C, int nsample, + const float *features, const int *features_batch_cnt, const int *idx, const int *idx_batch_cnt, float *out) { + // :param features: (N1 + N2 ..., C) tensor of features to group + // :param features_batch_cnt: (batch_size) [N1 + N2 ...] tensor containing the indicies of features to group with + // :param idx: (M1 + M2 ..., nsample) tensor containing the indicies of features to group with + // :param idx_batch_cnt: (batch_size) [M1 + M2 ...] tensor containing the indicies of features to group with + // :return: + // output: (M1 + M2, C, nsample) tensor + + cudaError_t err; + dim3 blocks(DIVUP(M * C * nsample, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + group_points_kernel_stack<<>>(B, M, C, nsample, features, features_batch_cnt, idx, idx_batch_cnt, out); + // cudaDeviceSynchronize(); // for using printf in kernel function + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/group_points_gpu.h b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/group_points_gpu.h new file mode 100644 index 000000000..4a2662167 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/group_points_gpu.h @@ -0,0 +1,31 @@ +/* +Stacked-batch-data version of point grouping, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#ifndef _STACK_GROUP_POINTS_GPU_H +#define _STACK_GROUP_POINTS_GPU_H + +#include +#include +#include +#include + + +int group_points_wrapper_stack(int B, int M, int C, int nsample, + at::Tensor features_tensor, at::Tensor features_batch_cnt_tensor, + at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, at::Tensor out_tensor); + +void group_points_kernel_launcher_stack(int B, int M, int C, int nsample, + const float *features, const int *features_batch_cnt, const int *idx, const int *idx_batch_cnt, float *out); + +int group_points_grad_wrapper_stack(int B, int M, int C, int N, int nsample, + at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor idx_batch_cnt_tensor, + at::Tensor features_batch_cnt_tensor, at::Tensor grad_features_tensor); + +void group_points_grad_kernel_launcher_stack(int B, int M, int C, int N, int nsample, + const float *grad_out, const int *idx, const int *idx_batch_cnt, const int *features_batch_cnt, float *grad_features); + +#endif diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/interpolate.cpp b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/interpolate.cpp new file mode 100644 index 000000000..db9a41ae7 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/interpolate.cpp @@ -0,0 +1,107 @@ +/* +Stacked-batch-data version of point interpolation, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#include +#include +#include +#include +#include +#include +#include +#include "interpolate_gpu.h" + +#define CHECK_CUDA(x) do { \ + if (!x.type().is_cuda()) { \ + fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_CONTIGUOUS(x) do { \ + if (!x.is_contiguous()) { \ + fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) + + +void three_nn_wrapper_stack(at::Tensor unknown_tensor, + at::Tensor unknown_batch_cnt_tensor, at::Tensor known_tensor, + at::Tensor known_batch_cnt_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor){ + // unknown: (N1 + N2 ..., 3) + // unknown_batch_cnt: (batch_size), [N1, N2, ...] + // known: (M1 + M2 ..., 3) + // known_batch_cnt: (batch_size), [M1, M2, ...] + // Return: + // dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors + // idx: (N1 + N2 ..., 3) index of the three nearest neighbors + CHECK_INPUT(unknown_tensor); + CHECK_INPUT(unknown_batch_cnt_tensor); + CHECK_INPUT(known_tensor); + CHECK_INPUT(known_batch_cnt_tensor); + CHECK_INPUT(dist2_tensor); + CHECK_INPUT(idx_tensor); + + int batch_size = unknown_batch_cnt_tensor.size(0); + int N = unknown_tensor.size(0); + int M = known_tensor.size(0); + const float *unknown = unknown_tensor.data(); + const int *unknown_batch_cnt = unknown_batch_cnt_tensor.data(); + const float *known = known_tensor.data(); + const int *known_batch_cnt = known_batch_cnt_tensor.data(); + float *dist2 = dist2_tensor.data(); + int *idx = idx_tensor.data(); + + three_nn_kernel_launcher_stack(batch_size, N, M, unknown, unknown_batch_cnt, known, known_batch_cnt, dist2, idx); +} + + +void three_interpolate_wrapper_stack(at::Tensor features_tensor, + at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor) { + // features_tensor: (M1 + M2 ..., C) + // idx_tensor: [N1 + N2 ..., 3] + // weight_tensor: [N1 + N2 ..., 3] + // Return: + // out_tensor: (N1 + N2 ..., C) + CHECK_INPUT(features_tensor); + CHECK_INPUT(idx_tensor); + CHECK_INPUT(weight_tensor); + CHECK_INPUT(out_tensor); + + int N = out_tensor.size(0); + int channels = features_tensor.size(1); + const float *features = features_tensor.data(); + const float *weight = weight_tensor.data(); + const int *idx = idx_tensor.data(); + float *out = out_tensor.data(); + + three_interpolate_kernel_launcher_stack(N, channels, features, idx, weight, out); +} + + +void three_interpolate_grad_wrapper_stack(at::Tensor grad_out_tensor, at::Tensor idx_tensor, + at::Tensor weight_tensor, at::Tensor grad_features_tensor) { + // grad_out_tensor: (N1 + N2 ..., C) + // idx_tensor: [N1 + N2 ..., 3] + // weight_tensor: [N1 + N2 ..., 3] + // Return: + // grad_features_tensor: (M1 + M2 ..., C) + CHECK_INPUT(grad_out_tensor); + CHECK_INPUT(idx_tensor); + CHECK_INPUT(weight_tensor); + CHECK_INPUT(grad_features_tensor); + + int N = grad_out_tensor.size(0); + int channels = grad_out_tensor.size(1); + const float *grad_out = grad_out_tensor.data(); + const float *weight = weight_tensor.data(); + const int *idx = idx_tensor.data(); + float *grad_features = grad_features_tensor.data(); + + // printf("N=%d, channels=%d\n", N, channels); + three_interpolate_grad_kernel_launcher_stack(N, channels, grad_out, idx, weight, grad_features); +} \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/interpolate_gpu.cu b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/interpolate_gpu.cu new file mode 100644 index 000000000..b87fa3460 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/interpolate_gpu.cu @@ -0,0 +1,196 @@ +/* +Stacked-batch-data version of point interpolation, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#include +#include +#include +#include + +#include "cuda_utils.h" +#include "interpolate_gpu.h" + + +__global__ void three_nn_kernel_stack(int batch_size, int N, int M, const float *unknown, + const int *unknown_batch_cnt, const float *known, const int *known_batch_cnt, + float *dist2, int *idx) { + // unknown: (N1 + N2 ..., 3) + // unknown_batch_cnt: (batch_size), [N1, N2, ...] + // known: (M1 + M2 ..., 3) + // known_batch_cnt: (batch_size), [M1, M2, ...] + // Return: + // dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors + // idx: (N1 + N2 ..., 3) index of the three nearest neighbors + + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (pt_idx >= N) return; + + int bs_idx = 0, pt_cnt = unknown_batch_cnt[0]; + for (int k = 1; k < batch_size; k++){ + if (pt_idx < pt_cnt) break; + pt_cnt += unknown_batch_cnt[k]; + bs_idx = k; + } + + int cur_num_known_points = known_batch_cnt[bs_idx]; + + int known_batch_start_idx = 0; + for (int k = 0; k < bs_idx; k++) known_batch_start_idx += known_batch_cnt[k]; + + known += known_batch_start_idx * 3; + unknown += pt_idx * 3; + dist2 += pt_idx * 3; + idx += pt_idx * 3; + + float ux = unknown[0]; + float uy = unknown[1]; + float uz = unknown[2]; + + float best1 = FLT_MAX, best2 = FLT_MAX, best3 = FLT_MAX; + int besti1 = 0, besti2 = 0, besti3 = 0; + for (int k = 0; k < cur_num_known_points; ++k) { + float x = known[k * 3 + 0]; + float y = known[k * 3 + 1]; + float z = known[k * 3 + 2]; + float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z); + if (d < best1) { + best3 = best2; besti3 = besti2; + best2 = best1; besti2 = besti1; + best1 = d; besti1 = k; + } + else if (d < best2) { + best3 = best2; besti3 = besti2; + best2 = d; besti2 = k; + } + else if (d < best3) { + best3 = d; besti3 = k; + } + } + dist2[0] = best1; dist2[1] = best2; dist2[2] = best3; + idx[0] = besti1 + known_batch_start_idx; + idx[1] = besti2 + known_batch_start_idx; + idx[2] = besti3 + known_batch_start_idx; +} + + +void three_nn_kernel_launcher_stack(int batch_size, int N, int M, const float *unknown, + const int *unknown_batch_cnt, const float *known, const int *known_batch_cnt, + float *dist2, int *idx) { + // unknown: (N1 + N2 ..., 3) + // unknown_batch_cnt: (batch_size), [N1, N2, ...] + // known: (M1 + M2 ..., 3) + // known_batch_cnt: (batch_size), [M1, M2, ...] + // Return: + // dist: (N1 + N2 ..., 3) l2 distance to the three nearest neighbors + // idx: (N1 + N2 ..., 3) index of the three nearest neighbors + + cudaError_t err; + dim3 blocks(DIVUP(N, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + three_nn_kernel_stack<<>>( + batch_size, N, M, unknown, unknown_batch_cnt, + known, known_batch_cnt, dist2, idx + ); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} + + + +__global__ void three_interpolate_kernel_stack(int N, int channels, const float *features, + const int *idx, const float *weight, float *out) { + // features: (M1 + M2 ..., C) + // idx: [N1 + N2 ..., 3] + // weight: [N1 + N2 ..., 3] + // Return: + // out: (N1 + N2 ..., C) + + int c_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (pt_idx >= N || c_idx >= channels) return; + + weight += pt_idx * 3; + idx += pt_idx * 3; + out += pt_idx * channels + c_idx; + + out[0] = weight[0] * features[idx[0] * channels + c_idx] + + weight[1] * features[idx[1] * channels + c_idx] + + weight[2] * features[idx[2] * channels + c_idx]; +} + + + +void three_interpolate_kernel_launcher_stack(int N, int channels, + const float *features, const int *idx, const float *weight, float *out) { + // features: (M1 + M2 ..., C) + // idx: [N1 + N2 ..., 3] + // weight: [N1 + N2 ..., 3] + // Return: + // out: (N1 + N2 ..., C) + + cudaError_t err; + dim3 blocks(DIVUP(N, THREADS_PER_BLOCK), channels); + dim3 threads(THREADS_PER_BLOCK); + three_interpolate_kernel_stack<<>>(N, channels, features, idx, weight, out); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} + + +__global__ void three_interpolate_grad_kernel_stack(int N, int channels, const float *grad_out, + const int *idx, const float *weight, float *grad_features) { + // grad_out_tensor: (N1 + N2 ..., C) + // idx_tensor: [N1 + N2 ..., 3] + // weight_tensor: [N1 + N2 ..., 3] + // Return: + // grad_features_tensor: (M1 + M2 ..., C) + + int c_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (pt_idx >= N || c_idx >= channels) return; + + grad_out += pt_idx * channels + c_idx; + weight += pt_idx * 3; + idx += pt_idx * 3; + + // printf("pt_idx=%d, c_idx=%d, idx=(%d, %d, %d), grad_out=%f\n", pt_idx, c_idx, idx[0], idx[1], idx[2], grad_out[0]); + + atomicAdd(grad_features + idx[0] * channels + c_idx, grad_out[0] * weight[0]); + atomicAdd(grad_features + idx[1] * channels + c_idx, grad_out[0] * weight[1]); + atomicAdd(grad_features + idx[2] * channels + c_idx, grad_out[0] * weight[2]); +} + + +void three_interpolate_grad_kernel_launcher_stack(int N, int channels, const float *grad_out, + const int *idx, const float *weight, float *grad_features) { + // grad_out_tensor: (N1 + N2 ..., C) + // idx_tensor: [N1 + N2 ..., 3] + // weight_tensor: [N1 + N2 ..., 3] + // Return: + // grad_features_tensor: (M1 + M2 ..., C) + + cudaError_t err; + dim3 blocks(DIVUP(N, THREADS_PER_BLOCK), channels); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + three_interpolate_grad_kernel_stack<<>>( + N, channels, grad_out, idx, weight, grad_features + ); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/interpolate_gpu.h b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/interpolate_gpu.h new file mode 100644 index 000000000..12775ec33 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/interpolate_gpu.h @@ -0,0 +1,39 @@ +#ifndef _INTERPOLATE_GPU_H +#define _INTERPOLATE_GPU_H + +#include +#include +#include +#include + + +void three_nn_wrapper_stack(at::Tensor unknown_tensor, + at::Tensor unknown_batch_cnt_tensor, at::Tensor known_tensor, + at::Tensor known_batch_cnt_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor); + + +void three_interpolate_wrapper_stack(at::Tensor features_tensor, + at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor); + + + +void three_interpolate_grad_wrapper_stack(at::Tensor grad_out_tensor, at::Tensor idx_tensor, + at::Tensor weight_tensor, at::Tensor grad_features_tensor); + + +void three_nn_kernel_launcher_stack(int batch_size, int N, int M, const float *unknown, + const int *unknown_batch_cnt, const float *known, const int *known_batch_cnt, + float *dist2, int *idx); + + +void three_interpolate_kernel_launcher_stack(int N, int channels, + const float *features, const int *idx, const float *weight, float *out); + + + +void three_interpolate_grad_kernel_launcher_stack(int N, int channels, const float *grad_out, + const int *idx, const float *weight, float *grad_features); + + + +#endif \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/pointnet2_api.cpp b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/pointnet2_api.cpp new file mode 100644 index 000000000..1b61e4158 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/pointnet2_api.cpp @@ -0,0 +1,31 @@ +#include +#include + +#include "ball_query_gpu.h" +#include "group_points_gpu.h" +#include "sampling_gpu.h" +#include "interpolate_gpu.h" +#include "voxel_query_gpu.h" +#include "vector_pool_gpu.h" + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("ball_query_wrapper", &ball_query_wrapper_stack, "ball_query_wrapper_stack"); + m.def("voxel_query_wrapper", &voxel_query_wrapper_stack, "voxel_query_wrapper_stack"); + + m.def("farthest_point_sampling_wrapper", &farthest_point_sampling_wrapper, "farthest_point_sampling_wrapper"); + m.def("stack_farthest_point_sampling_wrapper", &stack_farthest_point_sampling_wrapper, "stack_farthest_point_sampling_wrapper"); + + m.def("group_points_wrapper", &group_points_wrapper_stack, "group_points_wrapper_stack"); + m.def("group_points_grad_wrapper", &group_points_grad_wrapper_stack, "group_points_grad_wrapper_stack"); + + m.def("three_nn_wrapper", &three_nn_wrapper_stack, "three_nn_wrapper_stack"); + m.def("three_interpolate_wrapper", &three_interpolate_wrapper_stack, "three_interpolate_wrapper_stack"); + m.def("three_interpolate_grad_wrapper", &three_interpolate_grad_wrapper_stack, "three_interpolate_grad_wrapper_stack"); + + m.def("query_stacked_local_neighbor_idxs_wrapper_stack", &query_stacked_local_neighbor_idxs_wrapper_stack, "query_stacked_local_neighbor_idxs_wrapper_stack"); + m.def("query_three_nn_by_stacked_local_idxs_wrapper_stack", &query_three_nn_by_stacked_local_idxs_wrapper_stack, "query_three_nn_by_stacked_local_idxs_wrapper_stack"); + + m.def("vector_pool_wrapper", &vector_pool_wrapper_stack, "vector_pool_grad_wrapper_stack"); + m.def("vector_pool_grad_wrapper", &vector_pool_grad_wrapper_stack, "vector_pool_grad_wrapper_stack"); +} diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/sampling.cpp b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/sampling.cpp new file mode 100644 index 000000000..6bc6b43b6 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/sampling.cpp @@ -0,0 +1,57 @@ +#include +#include +#include +#include "sampling_gpu.h" + +#define CHECK_CUDA(x) do { \ + if (!x.type().is_cuda()) { \ + fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_CONTIGUOUS(x) do { \ + if (!x.is_contiguous()) { \ + fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) + + +int farthest_point_sampling_wrapper(int b, int n, int m, + at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor) { + + CHECK_INPUT(points_tensor); + CHECK_INPUT(temp_tensor); + CHECK_INPUT(idx_tensor); + + const float *points = points_tensor.data(); + float *temp = temp_tensor.data(); + int *idx = idx_tensor.data(); + + farthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx); + return 1; +} + + +int stack_farthest_point_sampling_wrapper(at::Tensor points_tensor, + at::Tensor temp_tensor, at::Tensor xyz_batch_cnt_tensor, at::Tensor idx_tensor, + at::Tensor num_sampled_points_tensor) { + + CHECK_INPUT(points_tensor); + CHECK_INPUT(temp_tensor); + CHECK_INPUT(idx_tensor); + CHECK_INPUT(xyz_batch_cnt_tensor); + CHECK_INPUT(num_sampled_points_tensor); + + int batch_size = xyz_batch_cnt_tensor.size(0); + int N = points_tensor.size(0); + const float *points = points_tensor.data(); + float *temp = temp_tensor.data(); + int *xyz_batch_cnt = xyz_batch_cnt_tensor.data(); + int *idx = idx_tensor.data(); + int *num_sampled_points = num_sampled_points_tensor.data(); + + stack_farthest_point_sampling_kernel_launcher(N, batch_size, points, temp, xyz_batch_cnt, idx, num_sampled_points); + return 1; +} \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/sampling_gpu.cu b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/sampling_gpu.cu new file mode 100644 index 000000000..1629276ca --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/sampling_gpu.cu @@ -0,0 +1,350 @@ +#include +#include +#include + +#include "cuda_utils.h" +#include "sampling_gpu.h" +#define TOTAL_THREADS 1024 + + +inline int opt_n_threads(int work_size) { + const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0); + + return std::max(std::min(1 << pow_2, TOTAL_THREADS), 1); +} + + +__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2){ + const float v1 = dists[idx1], v2 = dists[idx2]; + const int i1 = dists_i[idx1], i2 = dists_i[idx2]; + dists[idx1] = std::max(v1, v2); + dists_i[idx1] = v2 > v1 ? i2 : i1; +} + + +template +__global__ void farthest_point_sampling_kernel(int b, int n, int m, + const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { + // dataset: (B, N, 3) + // tmp: (B, N) + // output: + // idx: (B, M) + + if (m <= 0) return; + __shared__ float dists[block_size]; + __shared__ int dists_i[block_size]; + + int batch_index = blockIdx.x; + dataset += batch_index * n * 3; + temp += batch_index * n; + idxs += batch_index * m; + + int tid = threadIdx.x; + const int stride = block_size; + + int old = 0; + if (threadIdx.x == 0) + idxs[0] = old; + + __syncthreads(); + for (int j = 1; j < m; j++) { + int besti = 0; + float best = -1; + float x1 = dataset[old * 3 + 0]; + float y1 = dataset[old * 3 + 1]; + float z1 = dataset[old * 3 + 2]; + for (int k = tid; k < n; k += stride) { + float x2, y2, z2; + x2 = dataset[k * 3 + 0]; + y2 = dataset[k * 3 + 1]; + z2 = dataset[k * 3 + 2]; + // float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); + // if (mag <= 1e-3) + // continue; + + float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); + float d2 = std::min(d, temp[k]); + temp[k] = d2; + besti = d2 > best ? k : besti; + best = d2 > best ? d2 : best; + } + dists[tid] = best; + dists_i[tid] = besti; + __syncthreads(); + + if (block_size >= 1024) { + if (tid < 512) { + __update(dists, dists_i, tid, tid + 512); + } + __syncthreads(); + } + + if (block_size >= 512) { + if (tid < 256) { + __update(dists, dists_i, tid, tid + 256); + } + __syncthreads(); + } + if (block_size >= 256) { + if (tid < 128) { + __update(dists, dists_i, tid, tid + 128); + } + __syncthreads(); + } + if (block_size >= 128) { + if (tid < 64) { + __update(dists, dists_i, tid, tid + 64); + } + __syncthreads(); + } + if (block_size >= 64) { + if (tid < 32) { + __update(dists, dists_i, tid, tid + 32); + } + __syncthreads(); + } + if (block_size >= 32) { + if (tid < 16) { + __update(dists, dists_i, tid, tid + 16); + } + __syncthreads(); + } + if (block_size >= 16) { + if (tid < 8) { + __update(dists, dists_i, tid, tid + 8); + } + __syncthreads(); + } + if (block_size >= 8) { + if (tid < 4) { + __update(dists, dists_i, tid, tid + 4); + } + __syncthreads(); + } + if (block_size >= 4) { + if (tid < 2) { + __update(dists, dists_i, tid, tid + 2); + } + __syncthreads(); + } + if (block_size >= 2) { + if (tid < 1) { + __update(dists, dists_i, tid, tid + 1); + } + __syncthreads(); + } + + old = dists_i[0]; + if (tid == 0) + idxs[j] = old; + } +} + +void farthest_point_sampling_kernel_launcher(int b, int n, int m, + const float *dataset, float *temp, int *idxs) { + // dataset: (B, N, 3) + // tmp: (B, N) + // output: + // idx: (B, M) + + cudaError_t err; + unsigned int n_threads = opt_n_threads(n); + + switch (n_threads) { + case 1024: + farthest_point_sampling_kernel<1024><<>>(b, n, m, dataset, temp, idxs); break; + case 512: + farthest_point_sampling_kernel<512><<>>(b, n, m, dataset, temp, idxs); break; + case 256: + farthest_point_sampling_kernel<256><<>>(b, n, m, dataset, temp, idxs); break; + case 128: + farthest_point_sampling_kernel<128><<>>(b, n, m, dataset, temp, idxs); break; + case 64: + farthest_point_sampling_kernel<64><<>>(b, n, m, dataset, temp, idxs); break; + case 32: + farthest_point_sampling_kernel<32><<>>(b, n, m, dataset, temp, idxs); break; + case 16: + farthest_point_sampling_kernel<16><<>>(b, n, m, dataset, temp, idxs); break; + case 8: + farthest_point_sampling_kernel<8><<>>(b, n, m, dataset, temp, idxs); break; + case 4: + farthest_point_sampling_kernel<4><<>>(b, n, m, dataset, temp, idxs); break; + case 2: + farthest_point_sampling_kernel<2><<>>(b, n, m, dataset, temp, idxs); break; + case 1: + farthest_point_sampling_kernel<1><<>>(b, n, m, dataset, temp, idxs); break; + default: + farthest_point_sampling_kernel<512><<>>(b, n, m, dataset, temp, idxs); + } + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} + + +template +__global__ void stack_farthest_point_sampling_kernel(int batch_size, int N, + const float *dataset, float *temp, int *xyz_batch_cnt, int *idxs, int *num_sampled_points) { + // """ + // Args: + // ctx: + // dataset: (N1 + N2 + ..., 3) where N > npoint + // temp: (N1 + N2 + ...) where N > npoint + // xyz_batch_cnt: [N1, N2, ...] + // num_sampled_points: [M1, M2, ...] int, number of features in the sampled set + + // Returns: + // idxs: (npoint.sum()) tensor containing the set, + // npoint: (M1, M2, ...) + // """ + + __shared__ float dists[block_size]; + __shared__ int dists_i[block_size]; + + int bs_idx = blockIdx.x; + + int xyz_batch_start_idx = 0, idxs_start_idx = 0; + for (int k = 0; k < bs_idx; k++){ + xyz_batch_start_idx += xyz_batch_cnt[k]; + idxs_start_idx += num_sampled_points[k]; + } + + dataset += xyz_batch_start_idx * 3; + temp += xyz_batch_start_idx; + idxs += idxs_start_idx; + + int n = xyz_batch_cnt[bs_idx]; + int m = num_sampled_points[bs_idx]; + + int tid = threadIdx.x; + const int stride = block_size; + + int old = 0; + if (threadIdx.x == 0) idxs[0] = xyz_batch_start_idx; + + __syncthreads(); + for (int j = 1; j < m; j++) { + int besti = 0; + float best = -1; + float x1 = dataset[old * 3 + 0]; + float y1 = dataset[old * 3 + 1]; + float z1 = dataset[old * 3 + 2]; + for (int k = tid; k < n; k += stride) { + float x2, y2, z2; + x2 = dataset[k * 3 + 0]; + y2 = dataset[k * 3 + 1]; + z2 = dataset[k * 3 + 2]; + // float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); + // if (mag <= 1e-3) + // continue; + + float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); + float d2 = std::min(d, temp[k]); + temp[k] = d2; + besti = d2 > best ? k : besti; + best = d2 > best ? d2 : best; + } + dists[tid] = best; + dists_i[tid] = besti; + __syncthreads(); + + if (block_size >= 1024) { + if (tid < 512) { + __update(dists, dists_i, tid, tid + 512); + } + __syncthreads(); + } + + if (block_size >= 512) { + if (tid < 256) { + __update(dists, dists_i, tid, tid + 256); + } + __syncthreads(); + } + if (block_size >= 256) { + if (tid < 128) { + __update(dists, dists_i, tid, tid + 128); + } + __syncthreads(); + } + if (block_size >= 128) { + if (tid < 64) { + __update(dists, dists_i, tid, tid + 64); + } + __syncthreads(); + } + if (block_size >= 64) { + if (tid < 32) { + __update(dists, dists_i, tid, tid + 32); + } + __syncthreads(); + } + if (block_size >= 32) { + if (tid < 16) { + __update(dists, dists_i, tid, tid + 16); + } + __syncthreads(); + } + if (block_size >= 16) { + if (tid < 8) { + __update(dists, dists_i, tid, tid + 8); + } + __syncthreads(); + } + if (block_size >= 8) { + if (tid < 4) { + __update(dists, dists_i, tid, tid + 4); + } + __syncthreads(); + } + if (block_size >= 4) { + if (tid < 2) { + __update(dists, dists_i, tid, tid + 2); + } + __syncthreads(); + } + if (block_size >= 2) { + if (tid < 1) { + __update(dists, dists_i, tid, tid + 1); + } + __syncthreads(); + } + + old = dists_i[0]; + if (tid == 0) + idxs[j] = old + xyz_batch_start_idx; + } +} + + +void stack_farthest_point_sampling_kernel_launcher(int N, int batch_size, + const float *dataset, float *temp, int *xyz_batch_cnt, int *idxs, int *num_sampled_points) { + // """ + // Args: + // ctx: + // dataset: (N1 + N2 + ..., 3) where N > npoint + // temp: (N1 + N2 + ...) where N > npoint + // xyz_batch_cnt: [N1, N2, ...] + // npoint: int, number of features in the sampled set + + // Returns: + // idxs: (npoint.sum()) tensor containing the set, + // npoint: (M1, M2, ...) + // """ + + cudaError_t err; + unsigned int n_threads = opt_n_threads(N); + + stack_farthest_point_sampling_kernel<1024><<>>( + batch_size, N, dataset, temp, xyz_batch_cnt, idxs, num_sampled_points + ); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/sampling_gpu.h b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/sampling_gpu.h new file mode 100644 index 000000000..c33996a40 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/sampling_gpu.h @@ -0,0 +1,23 @@ +#ifndef _SAMPLING_GPU_H +#define _SAMPLING_GPU_H + +#include +#include +#include + + +int farthest_point_sampling_wrapper(int b, int n, int m, + at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor); + +void farthest_point_sampling_kernel_launcher(int b, int n, int m, + const float *dataset, float *temp, int *idxs); + +int stack_farthest_point_sampling_wrapper( + at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor xyz_batch_cnt_tensor, + at::Tensor idx_tensor, at::Tensor num_sampled_points_tensor); + + +void stack_farthest_point_sampling_kernel_launcher(int N, int batch_size, + const float *dataset, float *temp, int *xyz_batch_cnt, int *idxs, int *num_sampled_points); + +#endif diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/vector_pool.cpp b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/vector_pool.cpp new file mode 100644 index 000000000..f3fed5ebd --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/vector_pool.cpp @@ -0,0 +1,200 @@ +/* +Vector-pool aggregation based local feature aggregation for point cloud. +PV-RCNN++: Point-Voxel Feature Set Abstraction With Local Vector Representation for 3D Object Detection +https://arxiv.org/abs/2102.00463 + +Written by Shaoshuai Shi +All Rights Reserved 2020. +*/ + + +#include +#include +#include +#include +#include "vector_pool_gpu.h" + +#define CHECK_CUDA(x) do { \ + if (!x.type().is_cuda()) { \ + fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_CONTIGUOUS(x) do { \ + if (!x.is_contiguous()) { \ + fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) + + +int query_stacked_local_neighbor_idxs_wrapper_stack(at::Tensor support_xyz_tensor, at::Tensor xyz_batch_cnt_tensor, + at::Tensor new_xyz_tensor, at::Tensor new_xyz_batch_cnt_tensor, + at::Tensor stack_neighbor_idxs_tensor, at::Tensor start_len_tensor, at::Tensor cumsum_tensor, + int avg_length_of_neighbor_idxs, float max_neighbour_distance, int nsample, int neighbor_type){ + // support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features + // xyz_batch_cnt: (batch_size), [N1, N2, ...] + // new_xyz: (M1 + M2 ..., 3) centers of the ball query + // new_xyz_grid_centers: (M1 + M2 ..., num_total_grids, 3) grids centers of each grid + // new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + // new_xyz_grid_idxs: (M1 + M2 ..., num_total_grids, 3) three-nn + // new_xyz_grid_dist2: (M1 + M2 ..., num_total_grids, 3) square of dist of three-nn + // num_grid_x, num_grid_y, num_grid_z: number of grids in each local area centered at new_xyz + // nsample: find all (-1), find limited number(>0) + // neighbor_type: 1: ball, others: cube + + CHECK_INPUT(support_xyz_tensor); + CHECK_INPUT(xyz_batch_cnt_tensor); + CHECK_INPUT(new_xyz_tensor); + CHECK_INPUT(new_xyz_batch_cnt_tensor); + CHECK_INPUT(stack_neighbor_idxs_tensor); + CHECK_INPUT(start_len_tensor); + CHECK_INPUT(cumsum_tensor); + + const float *support_xyz = support_xyz_tensor.data(); + const int *xyz_batch_cnt = xyz_batch_cnt_tensor.data(); + const float *new_xyz = new_xyz_tensor.data(); + const int *new_xyz_batch_cnt = new_xyz_batch_cnt_tensor.data(); + int *stack_neighbor_idxs = stack_neighbor_idxs_tensor.data(); + int *start_len = start_len_tensor.data(); + int *cumsum = cumsum_tensor.data(); + + int batch_size = xyz_batch_cnt_tensor.size(0); + int M = new_xyz_tensor.size(0); + + query_stacked_local_neighbor_idxs_kernel_launcher_stack( + support_xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, + stack_neighbor_idxs, start_len, cumsum, avg_length_of_neighbor_idxs, + max_neighbour_distance, batch_size, M, nsample, neighbor_type + ); + return 0; +} + + +int query_three_nn_by_stacked_local_idxs_wrapper_stack(at::Tensor support_xyz_tensor, + at::Tensor new_xyz_tensor, at::Tensor new_xyz_grid_centers_tensor, + at::Tensor new_xyz_grid_idxs_tensor, at::Tensor new_xyz_grid_dist2_tensor, + at::Tensor stack_neighbor_idxs_tensor, at::Tensor start_len_tensor, + int M, int num_total_grids){ + // support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features + // new_xyz: (M1 + M2 ..., 3) centers of the ball query + // new_xyz_grid_centers: (M1 + M2 ..., num_total_grids, 3) grids centers of each grid + // new_xyz_grid_idxs: (M1 + M2 ..., num_total_grids, 3) three-nn + // new_xyz_grid_dist2: (M1 + M2 ..., num_total_grids, 3) square of dist of three-nn + // stack_neighbor_idxs: (max_length_of_neighbor_idxs) + // start_len: (M1 + M2, 2) [start_offset, neighbor_length] + + CHECK_INPUT(support_xyz_tensor); + CHECK_INPUT(new_xyz_tensor); + CHECK_INPUT(new_xyz_grid_centers_tensor); + CHECK_INPUT(new_xyz_grid_idxs_tensor); + CHECK_INPUT(new_xyz_grid_dist2_tensor); + CHECK_INPUT(stack_neighbor_idxs_tensor); + CHECK_INPUT(start_len_tensor); + + const float *support_xyz = support_xyz_tensor.data(); + const float *new_xyz = new_xyz_tensor.data(); + const float *new_xyz_grid_centers = new_xyz_grid_centers_tensor.data(); + int *new_xyz_grid_idxs = new_xyz_grid_idxs_tensor.data(); + float *new_xyz_grid_dist2 = new_xyz_grid_dist2_tensor.data(); + int *stack_neighbor_idxs = stack_neighbor_idxs_tensor.data(); + int *start_len = start_len_tensor.data(); + + query_three_nn_by_stacked_local_idxs_kernel_launcher_stack( + support_xyz, new_xyz, new_xyz_grid_centers, + new_xyz_grid_idxs, new_xyz_grid_dist2, stack_neighbor_idxs, start_len, + M, num_total_grids + ); + return 0; +} + + +int vector_pool_wrapper_stack(at::Tensor support_xyz_tensor, at::Tensor xyz_batch_cnt_tensor, + at::Tensor support_features_tensor, at::Tensor new_xyz_tensor, at::Tensor new_xyz_batch_cnt_tensor, + at::Tensor new_features_tensor, at::Tensor new_local_xyz_tensor, + at::Tensor point_cnt_of_grid_tensor, at::Tensor grouped_idxs_tensor, + int num_grid_x, int num_grid_y, int num_grid_z, float max_neighbour_distance, int use_xyz, + int num_max_sum_points, int nsample, int neighbor_type, int pooling_type){ + // support_xyz_tensor: (N1 + N2 ..., 3) xyz coordinates of the features + // support_features_tensor: (N1 + N2 ..., C) + // xyz_batch_cnt: (batch_size), [N1, N2, ...] + // new_xyz_tensor: (M1 + M2 ..., 3) centers of new positions + // new_features_tensor: (M1 + M2 ..., C) + // new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + // point_cnt_of_grid: (M1 + M2 ..., num_total_grids) + // grouped_idxs_tensor: (num_max_sum_points, 3) + // num_grid_x, num_grid_y, num_grid_z: number of grids in each local area centered at new_xyz + // use_xyz: whether to calculate new_local_xyz + // neighbor_type: 1: ball, others: cube + // pooling_type: 0: avg_pool, 1: random choice + + CHECK_INPUT(support_xyz_tensor); + CHECK_INPUT(support_features_tensor); + CHECK_INPUT(xyz_batch_cnt_tensor); + CHECK_INPUT(new_xyz_tensor); + CHECK_INPUT(new_xyz_batch_cnt_tensor); + CHECK_INPUT(new_features_tensor); + CHECK_INPUT(new_local_xyz_tensor); + CHECK_INPUT(point_cnt_of_grid_tensor); + CHECK_INPUT(grouped_idxs_tensor); + + const float *support_xyz = support_xyz_tensor.data(); + const float *support_features = support_features_tensor.data(); + const int *xyz_batch_cnt = xyz_batch_cnt_tensor.data(); + const float *new_xyz = new_xyz_tensor.data(); + const int *new_xyz_batch_cnt = new_xyz_batch_cnt_tensor.data(); + float *new_features = new_features_tensor.data(); + float *new_local_xyz = new_local_xyz_tensor.data(); + int *point_cnt_of_grid = point_cnt_of_grid_tensor.data(); + int *grouped_idxs = grouped_idxs_tensor.data(); + + int N = support_xyz_tensor.size(0); + int batch_size = xyz_batch_cnt_tensor.size(0); + int M = new_xyz_tensor.size(0); + int num_c_out = new_features_tensor.size(1); + int num_c_in = support_features_tensor.size(1); + int num_total_grids = point_cnt_of_grid_tensor.size(1); + + int cum_sum = vector_pool_kernel_launcher_stack( + support_xyz, support_features, xyz_batch_cnt, + new_xyz, new_features, new_local_xyz, new_xyz_batch_cnt, + point_cnt_of_grid, grouped_idxs, + num_grid_x, num_grid_y, num_grid_z, max_neighbour_distance, + batch_size, N, M, num_c_in, num_c_out, num_total_grids, use_xyz, num_max_sum_points, nsample, neighbor_type, pooling_type + ); + return cum_sum; +} + + +int vector_pool_grad_wrapper_stack(at::Tensor grad_new_features_tensor, + at::Tensor point_cnt_of_grid_tensor, at::Tensor grouped_idxs_tensor, + at::Tensor grad_support_features_tensor) { + // grad_new_features_tensor: (M1 + M2 ..., C_out) + // point_cnt_of_grid_tensor: (M1 + M2 ..., num_total_grids) + // grouped_idxs_tensor: (num_max_sum_points, 3) [idx of support_xyz, idx of new_xyz, idx of grid_idx in new_xyz] + // grad_support_features_tensor: (N1 + N2 ..., C_in) + + CHECK_INPUT(grad_new_features_tensor); + CHECK_INPUT(point_cnt_of_grid_tensor); + CHECK_INPUT(grouped_idxs_tensor); + CHECK_INPUT(grad_support_features_tensor); + + int M = grad_new_features_tensor.size(0); + int num_c_out = grad_new_features_tensor.size(1); + int N = grad_support_features_tensor.size(0); + int num_c_in = grad_support_features_tensor.size(1); + int num_total_grids = point_cnt_of_grid_tensor.size(1); + int num_max_sum_points = grouped_idxs_tensor.size(0); + + const float *grad_new_features = grad_new_features_tensor.data(); + const int *point_cnt_of_grid = point_cnt_of_grid_tensor.data(); + const int *grouped_idxs = grouped_idxs_tensor.data(); + float *grad_support_features = grad_support_features_tensor.data(); + + vector_pool_grad_kernel_launcher_stack( + grad_new_features, point_cnt_of_grid, grouped_idxs, grad_support_features, + N, M, num_c_out, num_c_in, num_total_grids, num_max_sum_points + ); + return 1; +} diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/vector_pool_gpu.cu b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/vector_pool_gpu.cu new file mode 100644 index 000000000..5c33cf463 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/vector_pool_gpu.cu @@ -0,0 +1,487 @@ +/* +Vector-pool aggregation based local feature aggregation for point cloud. +PV-RCNN++: Point-Voxel Feature Set Abstraction With Local Vector Representation for 3D Object Detection +https://arxiv.org/abs/2102.00463 + +Written by Shaoshuai Shi +All Rights Reserved 2020. +*/ + + +#include +#include +#include +#include + +#include "vector_pool_gpu.h" +#include "cuda_utils.h" + + +__global__ void query_three_nn_by_stacked_local_idxs_kernel( + const float *support_xyz, const float *new_xyz, const float *new_xyz_grid_centers, + int *new_xyz_grid_idxs, float *new_xyz_grid_dist2, + const int *stack_neighbor_idxs, const int *start_len, + int M, int num_total_grids){ + // support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features + // new_xyz: (M1 + M2 ..., 3) centers of the ball query + // new_xyz_grid_centers: (M1 + M2 ..., num_total_grids, 3) grids centers of each grid + // new_xyz_grid_idxs: (M1 + M2 ..., num_total_grids, 3) three-nn + // new_xyz_grid_dist2: (M1 + M2 ..., num_total_grids, 3) square of dist of three-nn + // stack_neighbor_idxs: (max_length_of_neighbor_idxs) + // start_len: (M1 + M2, 2) [start_offset, neighbor_length] + + int grid_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + + if (pt_idx >= M || grid_idx >= num_total_grids) return; + + new_xyz += pt_idx * 3; + new_xyz_grid_centers += pt_idx * num_total_grids * 3 + grid_idx * 3; + new_xyz_grid_idxs += pt_idx * num_total_grids * 3 + grid_idx * 3; + new_xyz_grid_dist2 += pt_idx * num_total_grids * 3 + grid_idx * 3; + + start_len += pt_idx * 2; + stack_neighbor_idxs += start_len[0]; + int neighbor_length = start_len[1]; + + float center_x = new_xyz_grid_centers[0]; + float center_y = new_xyz_grid_centers[1]; + float center_z = new_xyz_grid_centers[2]; + + float best1 = FLT_MAX, best2 = FLT_MAX, best3 = FLT_MAX; + int besti1 = -1, besti2 = -1, besti3 = -1; + for (int k = 0; k < neighbor_length; k++){ + int cur_neighbor_idx = stack_neighbor_idxs[k]; + + float x = support_xyz[cur_neighbor_idx * 3 + 0]; + float y = support_xyz[cur_neighbor_idx * 3 + 1]; + float z = support_xyz[cur_neighbor_idx * 3 + 2]; + + float d = (center_x - x) * (center_x - x) + (center_y - y) * (center_y - y) + (center_z - z) * (center_z - z); + + if (d < best1) { + best3 = best2; besti3 = besti2; + best2 = best1; besti2 = besti1; + best1 = d; besti1 = cur_neighbor_idx; + } + else if (d < best2) { + best3 = best2; besti3 = besti2; + best2 = d; besti2 = cur_neighbor_idx; + } + else if (d < best3) { + best3 = d; besti3 = cur_neighbor_idx; + } + } + if (besti2 == -1){ + besti2 = besti1; best2 = best1; + } + if (besti3 == -1){ + besti3 = besti1; best3 = best1; + } + new_xyz_grid_dist2[0] = best1; + new_xyz_grid_dist2[1] = best2; + new_xyz_grid_dist2[2] = best3; + new_xyz_grid_idxs[0] = besti1; + new_xyz_grid_idxs[1] = besti2; + new_xyz_grid_idxs[2] = besti3; +} + + +int query_three_nn_by_stacked_local_idxs_kernel_launcher_stack( + const float *support_xyz, const float *new_xyz, const float *new_xyz_grid_centers, + int *new_xyz_grid_idxs, float *new_xyz_grid_dist2, + const int *stack_neighbor_idxs, const int *start_len, + int M, int num_total_grids){ + // support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features + // new_xyz: (M1 + M2 ..., 3) centers of the ball query + // new_xyz_grid_centers: (M1 + M2 ..., num_total_grids, 3) grids centers of each grid + // new_xyz_grid_idxs: (M1 + M2 ..., num_total_grids, 3) three-nn + // new_xyz_grid_dist2: (M1 + M2 ..., num_total_grids, 3) square of dist of three-nn + // stack_neighbor_idxs: (max_length_of_neighbor_idxs) + // start_len: (M1 + M2, 2) [start_offset, neighbor_length] + + cudaError_t err; + dim3 blocks(DIVUP(M, THREADS_PER_BLOCK), num_total_grids); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + query_three_nn_by_stacked_local_idxs_kernel<<>>( + support_xyz, new_xyz, new_xyz_grid_centers, + new_xyz_grid_idxs, new_xyz_grid_dist2, stack_neighbor_idxs, start_len, + M, num_total_grids + ); + + // cudaDeviceSynchronize(); // for using printf in kernel function + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } + return 0; +} + + +__global__ void query_stacked_local_neighbor_idxs_kernel( + const float *support_xyz, const int *xyz_batch_cnt, const float *new_xyz, const int *new_xyz_batch_cnt, + int *stack_neighbor_idxs, int *start_len, int *cumsum, int avg_length_of_neighbor_idxs, + float max_neighbour_distance, int batch_size, int M, int nsample, int neighbor_type){ + // support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features + // xyz_batch_cnt: (batch_size), [N1, N2, ...] + // new_xyz: (M1 + M2 ..., 3) centers of the ball query + // new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + // stack_neighbor_idxs: (max_length_of_neighbor_idxs) + // start_len: (M1 + M2, 2) [start_offset, neighbor_length] + // cumsum: (1), max offset of current data in stack_neighbor_idxs + // max_neighbour_distance: float + // nsample: find all (-1), find limited number(>0) + // neighbor_type: 1: ball, others: cube + + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (pt_idx >= M) return; + + int bs_idx = 0, pt_cnt = new_xyz_batch_cnt[0]; + for (int k = 1; k < batch_size; k++){ + if (pt_idx < pt_cnt) break; + pt_cnt += new_xyz_batch_cnt[k]; + bs_idx = k; + } + + int xyz_batch_start_idx = 0; + for (int k = 0; k < bs_idx; k++) xyz_batch_start_idx += xyz_batch_cnt[k]; + + support_xyz += xyz_batch_start_idx * 3; + new_xyz += pt_idx * 3; + start_len += pt_idx * 2; + + float new_x = new_xyz[0]; + float new_y = new_xyz[1]; + float new_z = new_xyz[2]; + int n = xyz_batch_cnt[bs_idx]; + + float local_x, local_y, local_z; + float radius2 = max_neighbour_distance * max_neighbour_distance; + + int temp_idxs[1000]; + + int sample_cnt = 0; + for (int k = 0; k < n; ++k) { + local_x = support_xyz[k * 3 + 0] - new_x; + local_y = support_xyz[k * 3 + 1] - new_y; + local_z = support_xyz[k * 3 + 2] - new_z; + + if (neighbor_type == 1){ + // ball + if (local_x * local_x + local_y * local_y + local_z * local_z > radius2){ + continue; + } + } + else{ + // voxel + if ((fabs(local_x) > max_neighbour_distance) | + (fabs(local_y) > max_neighbour_distance) | + (fabs(local_z) > max_neighbour_distance)){ + continue; + } + } + if (sample_cnt < 1000){ + temp_idxs[sample_cnt] = k; + } + else{ + break; + } + sample_cnt++; + if (nsample > 0 && sample_cnt >= nsample) break; + } + start_len[0] = atomicAdd(cumsum, sample_cnt); + start_len[1] = sample_cnt; + + int max_thresh = avg_length_of_neighbor_idxs * M; + if (start_len[0] >= max_thresh) return; + + stack_neighbor_idxs += start_len[0]; + if (start_len[0] + sample_cnt >= max_thresh) sample_cnt = max_thresh - start_len[0]; + + for (int k = 0; k < sample_cnt; k++){ + stack_neighbor_idxs[k] = temp_idxs[k] + xyz_batch_start_idx; + } +} + + +int query_stacked_local_neighbor_idxs_kernel_launcher_stack( + const float *support_xyz, const int *xyz_batch_cnt, const float *new_xyz, const int *new_xyz_batch_cnt, + int *stack_neighbor_idxs, int *start_len, int *cumsum, int avg_length_of_neighbor_idxs, + float max_neighbour_distance, int batch_size, int M, int nsample, int neighbor_type){ + // support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features + // xyz_batch_cnt: (batch_size), [N1, N2, ...] + // new_xyz: (M1 + M2 ..., 3) centers of the ball query + // new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + // stack_neighbor_idxs: (max_length_of_neighbor_idxs) + // start_len: (M1 + M2, 2) [start_offset, neighbor_length] + // cumsum: (1), max offset of current data in stack_neighbor_idxs + // max_neighbour_distance: float + // nsample: find all (-1), find limited number(>0) + // neighbor_type: 1: ball, others: cube + + cudaError_t err; + dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + query_stacked_local_neighbor_idxs_kernel<<>>( + support_xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, + stack_neighbor_idxs, start_len, cumsum, avg_length_of_neighbor_idxs, + max_neighbour_distance, batch_size, M, nsample, neighbor_type + ); + + // cudaDeviceSynchronize(); // for using printf in kernel function + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } + return 0; +} + + +__global__ void vector_pool_kernel_stack( + const float *support_xyz, const float *support_features, const int *xyz_batch_cnt, + const float *new_xyz, float *new_features, float *new_local_xyz, const int *new_xyz_batch_cnt, + int num_grid_x, int num_grid_y, int num_grid_z, float max_neighbour_distance, + int batch_size, int M, int num_c_in, int num_c_out, + int num_c_each_grid, int num_total_grids, int *point_cnt_of_grid, int *grouped_idxs, + int use_xyz, float grid_size_x, float grid_size_y, + float grid_size_z, int *cum_sum, int num_max_sum_points, int nsample, int neighbor_type, int pooling_type){ + // support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features + // support_features: (N1 + N2 ..., C) + // xyz_batch_cnt: (batch_size), [N1, N2, ...] + // new_xyz: (M1 + M2 ..., 3) centers of the ball query + // new_features: (M1 + M2 ..., C), C = num_total_grids * num_c_each_grid + // new_local_xyz: (M1 + M2 ..., 3 * num_total_grids) + // new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + // num_grid_x, num_grid_y, num_grid_z: number of grids in each local area centered at new_xyz + // point_cnt_of_grid: (M1 + M2 ..., num_total_grids) + // grouped_idxs: (num_max_sum_points, 3)[idx of support_xyz, idx of new_xyz, idx of grid_idx in new_xyz] + // use_xyz: whether to calculate new_local_xyz + // neighbor_type: 1: ball, others: cube + // pooling_type: 0: avg_pool, 1: random choice + + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (pt_idx >= M) return; + + int bs_idx = 0, pt_cnt = new_xyz_batch_cnt[0]; + for (int k = 1; k < batch_size; k++){ + if (pt_idx < pt_cnt) break; + pt_cnt += new_xyz_batch_cnt[k]; + bs_idx = k; + } + + int xyz_batch_start_idx = 0; + for (int k = 0; k < bs_idx; k++) xyz_batch_start_idx += xyz_batch_cnt[k]; + + support_xyz += xyz_batch_start_idx * 3; + support_features += xyz_batch_start_idx * num_c_in; + + new_xyz += pt_idx * 3; + new_features += pt_idx * num_c_out; + point_cnt_of_grid += pt_idx * num_total_grids; + new_local_xyz += pt_idx * 3 * num_total_grids; + + float new_x = new_xyz[0]; + float new_y = new_xyz[1]; + float new_z = new_xyz[2]; + int n = xyz_batch_cnt[bs_idx], grid_idx_x, grid_idx_y, grid_idx_z, grid_idx; + float local_x, local_y, local_z; + float radius2 = max_neighbour_distance * max_neighbour_distance; + + int sample_cnt = 0; + for (int k = 0; k < n; ++k) { + local_x = support_xyz[k * 3 + 0] - new_x; + local_y = support_xyz[k * 3 + 1] - new_y; + local_z = support_xyz[k * 3 + 2] - new_z; + + if (neighbor_type == 1){ + // ball + if (local_x * local_x + local_y * local_y + local_z * local_z > radius2){ + continue; + } + } + else{ + // voxel + if ((fabs(local_x) > max_neighbour_distance) | + (fabs(local_y) > max_neighbour_distance) | + (fabs(local_z) > max_neighbour_distance)){ + continue; + } + } + + grid_idx_x = floorf((local_x + max_neighbour_distance) / grid_size_x); + grid_idx_y = floorf((local_y + max_neighbour_distance) / grid_size_y); + grid_idx_z = floorf((local_z + max_neighbour_distance) / grid_size_z); + grid_idx = grid_idx_x * num_grid_y * num_grid_z + grid_idx_y * num_grid_z + grid_idx_z; + grid_idx = min(max(grid_idx, 0), num_total_grids - 1); + + if (pooling_type == 0){ + // avg pooling + point_cnt_of_grid[grid_idx] ++; + + for (int i = 0; i < num_c_in; i++){ + new_features[grid_idx * num_c_each_grid + i % num_c_each_grid] += support_features[k * num_c_in + i]; + } + if (use_xyz){ + new_local_xyz[grid_idx * 3 + 0] += local_x; + new_local_xyz[grid_idx * 3 + 1] += local_y; + new_local_xyz[grid_idx * 3 + 2] += local_z; + } + + int cnt = atomicAdd(cum_sum, 1); + if (cnt >= num_max_sum_points) continue; // continue to statistics the max number of points + + grouped_idxs[cnt * 3 + 0] = xyz_batch_start_idx + k; + grouped_idxs[cnt * 3 + 1] = pt_idx; + grouped_idxs[cnt * 3 + 2] = grid_idx; + + sample_cnt++; + if(nsample > 0 && sample_cnt >= nsample) break; + } + else if (pooling_type == 1){ + // random choose one within sub-voxel + // printf("new_xyz=(%.2f, %.2f, %.2f, ), find neighbor k=%d: support_xyz=(%.2f, %.2f, %.2f), local_xyz=(%.2f, %.2f, %.2f), neighbor=%.2f, grid_idx=%d, point_cnt_of_grid_idx=%d\n", + // new_x, new_y, new_z, k, support_xyz[k * 3 + 0], support_xyz[k * 3 + 1], support_xyz[k * 3 + 2], local_x, local_y, local_z, max_neighbour_distance, grid_idx, point_cnt_of_grid[grid_idx]); + + if (point_cnt_of_grid[grid_idx] == 0){ + point_cnt_of_grid[grid_idx] ++; + for (int i = 0; i < num_c_in; i++){ + new_features[grid_idx * num_c_each_grid + i % num_c_each_grid] = support_features[k * num_c_in + i]; + } + if (use_xyz){ + new_local_xyz[grid_idx * 3 + 0] = local_x; + new_local_xyz[grid_idx * 3 + 1] = local_y; + new_local_xyz[grid_idx * 3 + 2] = local_z; + } + + int cnt = atomicAdd(cum_sum, 1); + if (cnt >= num_max_sum_points) continue; // continue to statistics the max number of points + + grouped_idxs[cnt * 3 + 0] = xyz_batch_start_idx + k; + grouped_idxs[cnt * 3 + 1] = pt_idx; + grouped_idxs[cnt * 3 + 2] = grid_idx; + + sample_cnt++; + if(nsample > 0 && sample_cnt >= nsample || sample_cnt >= num_total_grids) break; + } + + } + + } +} + + +int vector_pool_kernel_launcher_stack( + const float *support_xyz, const float *support_features, const int *xyz_batch_cnt, + const float *new_xyz, float *new_features, float *new_local_xyz, const int *new_xyz_batch_cnt, + int *point_cnt_of_grid, int *grouped_idxs, + int num_grid_x, int num_grid_y, int num_grid_z, float max_neighbour_distance, + int batch_size, int N, int M, int num_c_in, int num_c_out, int num_total_grids, + int use_xyz, int num_max_sum_points, int nsample, int neighbor_type, int pooling_type){ + // support_xyz: (N1 + N2 ..., 3) xyz coordinates of the features + // support_features: (N1 + N2 ..., C) + // xyz_batch_cnt: (batch_size), [N1, N2, ...] + // new_xyz: (M1 + M2 ..., 3) centers of the ball query + // new_features: (M1 + M2 ..., C) + // new_local_xyz: (M1 + M2 ..., 3) + // new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + // num_grid_x, num_grid_y, num_grid_z: number of grids in each local area centered at new_xyz + // use_xyz: whether to calculate new_local_xyz + // grouped_idxs: (num_max_sum_points, 3)[idx of support_xyz, idx of new_xyz, idx of grid_idx in new_xyz] + // neighbor_type: 1: ball, others: cube + // pooling_type: 0: avg_pool, 1: random choice + + + cudaError_t err; + int num_c_each_grid = num_c_out / num_total_grids; + float grid_size_x = max_neighbour_distance * 2 / num_grid_x; + float grid_size_y = max_neighbour_distance * 2 / num_grid_y; + float grid_size_z = max_neighbour_distance * 2 / num_grid_z; + + dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + int cum_sum = 0; + int *p_cum_sum; + cudaMalloc((void**)&p_cum_sum, sizeof(int)); + cudaMemcpy(p_cum_sum, &cum_sum, sizeof(int), cudaMemcpyHostToDevice); + + vector_pool_kernel_stack<<>>( + support_xyz, support_features, xyz_batch_cnt, + new_xyz, new_features, new_local_xyz, new_xyz_batch_cnt, + num_grid_x, num_grid_y, num_grid_z, max_neighbour_distance, + batch_size, M, num_c_in, num_c_out, + num_c_each_grid, num_total_grids, point_cnt_of_grid, grouped_idxs, + use_xyz, grid_size_x, grid_size_y, grid_size_z, p_cum_sum, num_max_sum_points, + nsample, neighbor_type, pooling_type + ); + + cudaMemcpy(&cum_sum, p_cum_sum, sizeof(int), cudaMemcpyDeviceToHost); + + // cudaDeviceSynchronize(); // for using printf in kernel function + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } + return cum_sum; +} + + +__global__ void vector_pool_grad_kernel_stack(const float *grad_new_features, + const int *point_cnt_of_grid, const int *grouped_idxs, + float *grad_support_features, int N, int M, int num_c_out, int num_c_in, + int num_c_each_grid, int num_total_grids, int num_max_sum_points){ + // grad_new_features: (M1 + M2 ..., C_out) + // point_cnt_of_grid: (M1 + M2 ..., num_total_grids) + // grouped_idxs: (num_max_sum_points, 3) [idx of support_xyz, idx of new_xyz, idx of grid_idx in new_xyz] + // grad_support_features: (N1 + N2 ..., C_in) + + int channel_idx = blockIdx.y; + int index = blockIdx.x * blockDim.x + threadIdx.x; + + if (index >= num_max_sum_points || channel_idx >= num_c_in) return; + + int idx_of_support_xyz = grouped_idxs[index * 3 + 0]; + int idx_of_new_xyz = grouped_idxs[index * 3 + 1]; + int idx_of_grid_idx = grouped_idxs[index * 3 + 2]; + + int num_total_pts = point_cnt_of_grid[idx_of_new_xyz * num_total_grids + idx_of_grid_idx]; + grad_support_features += idx_of_support_xyz * num_c_in + channel_idx; + + grad_new_features += idx_of_new_xyz * num_c_out + idx_of_grid_idx * num_c_each_grid; + int channel_idx_of_cin = channel_idx % num_c_each_grid; + float cur_grad = 1 / fmaxf(float(num_total_pts), 1.0); + atomicAdd(grad_support_features, grad_new_features[channel_idx_of_cin] * cur_grad); +} + + +void vector_pool_grad_kernel_launcher_stack( + const float *grad_new_features, const int *point_cnt_of_grid, const int *grouped_idxs, + float *grad_support_features, int N, int M, int num_c_out, int num_c_in, int num_total_grids, + int num_max_sum_points){ + // grad_new_features: (M1 + M2 ..., C_out) + // point_cnt_of_grid: (M1 + M2 ..., num_total_grids) + // grouped_idxs: (num_max_sum_points, 3) [idx of support_xyz, idx of new_xyz, idx of grid_idx in new_xyz] + // grad_support_features: (N1 + N2 ..., C_in) + int num_c_each_grid = num_c_out / num_total_grids; + + cudaError_t err; + + dim3 blocks(DIVUP(num_max_sum_points, THREADS_PER_BLOCK), num_c_in); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + vector_pool_grad_kernel_stack<<>>( + grad_new_features, point_cnt_of_grid, grouped_idxs, grad_support_features, + N, M, num_c_out, num_c_in, num_c_each_grid, num_total_grids, num_max_sum_points + ); + // cudaDeviceSynchronize(); // for using printf in kernel function + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/vector_pool_gpu.h b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/vector_pool_gpu.h new file mode 100644 index 000000000..febfb8553 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/vector_pool_gpu.h @@ -0,0 +1,71 @@ +/* +Vector-pool aggregation based local feature aggregation for point cloud. +PV-RCNN++: Point-Voxel Feature Set Abstraction With Local Vector Representation for 3D Object Detection +https://arxiv.org/abs/2102.00463 + +Written by Shaoshuai Shi +All Rights Reserved 2020. +*/ + + +#ifndef _STACK_VECTOR_POOL_GPU_H +#define _STACK_VECTOR_POOL_GPU_H + +#include +#include +#include +#include + + +int query_stacked_local_neighbor_idxs_kernel_launcher_stack( + const float *support_xyz, const int *xyz_batch_cnt, const float *new_xyz, const int *new_xyz_batch_cnt, + int *stack_neighbor_idxs, int *start_len, int *cumsum, int avg_length_of_neighbor_idxs, + float max_neighbour_distance, int batch_size, int M, int nsample, int neighbor_type); + +int query_stacked_local_neighbor_idxs_wrapper_stack(at::Tensor support_xyz_tensor, at::Tensor xyz_batch_cnt_tensor, + at::Tensor new_xyz_tensor, at::Tensor new_xyz_batch_cnt_tensor, + at::Tensor stack_neighbor_idxs_tensor, at::Tensor start_len_tensor, at::Tensor cumsum_tensor, + int avg_length_of_neighbor_idxs, float max_neighbour_distance, int nsample, int neighbor_type); + + +int query_three_nn_by_stacked_local_idxs_kernel_launcher_stack( + const float *support_xyz, const float *new_xyz, const float *new_xyz_grid_centers, + int *new_xyz_grid_idxs, float *new_xyz_grid_dist2, + const int *stack_neighbor_idxs, const int *start_len, + int M, int num_total_grids); + +int query_three_nn_by_stacked_local_idxs_wrapper_stack(at::Tensor support_xyz_tensor, + at::Tensor new_xyz_tensor, at::Tensor new_xyz_grid_centers_tensor, + at::Tensor new_xyz_grid_idxs_tensor, at::Tensor new_xyz_grid_dist2_tensor, + at::Tensor stack_neighbor_idxs_tensor, at::Tensor start_len_tensor, + int M, int num_total_grids); + + +int vector_pool_wrapper_stack(at::Tensor support_xyz_tensor, at::Tensor xyz_batch_cnt_tensor, + at::Tensor support_features_tensor, at::Tensor new_xyz_tensor, at::Tensor new_xyz_batch_cnt_tensor, + at::Tensor new_features_tensor, at::Tensor new_local_xyz, + at::Tensor point_cnt_of_grid_tensor, at::Tensor grouped_idxs_tensor, + int num_grid_x, int num_grid_y, int num_grid_z, float max_neighbour_distance, int use_xyz, + int num_max_sum_points, int nsample, int neighbor_type, int pooling_type); + + +int vector_pool_kernel_launcher_stack( + const float *support_xyz, const float *support_features, const int *xyz_batch_cnt, + const float *new_xyz, float *new_features, float * new_local_xyz, const int *new_xyz_batch_cnt, + int *point_cnt_of_grid, int *grouped_idxs, + int num_grid_x, int num_grid_y, int num_grid_z, float max_neighbour_distance, + int batch_size, int N, int M, int num_c_in, int num_c_out, int num_total_grids, int use_xyz, + int num_max_sum_points, int nsample, int neighbor_type, int pooling_type); + + +int vector_pool_grad_wrapper_stack(at::Tensor grad_new_features_tensor, + at::Tensor point_cnt_of_grid_tensor, at::Tensor grouped_idxs_tensor, + at::Tensor grad_support_features_tensor); + + +void vector_pool_grad_kernel_launcher_stack( + const float *grad_new_features, const int *point_cnt_of_grid, const int *grouped_idxs, + float *grad_support_features, int N, int M, int num_c_out, int num_c_in, int num_total_grids, + int num_max_sum_points); + +#endif diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query.cpp b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query.cpp new file mode 100644 index 000000000..1bea75ed5 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query.cpp @@ -0,0 +1,41 @@ +#include +#include +#include +#include +#include +#include +#include +#include "voxel_query_gpu.h" + +#define CHECK_CUDA(x) do { \ + if (!x.type().is_cuda()) { \ + fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_CONTIGUOUS(x) do { \ + if (!x.is_contiguous()) { \ + fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) + + +int voxel_query_wrapper_stack(int M, int R1, int R2, int R3, int nsample, float radius, + int z_range, int y_range, int x_range, at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, + at::Tensor new_coords_tensor, at::Tensor point_indices_tensor, at::Tensor idx_tensor) { + CHECK_INPUT(new_coords_tensor); + CHECK_INPUT(point_indices_tensor); + CHECK_INPUT(new_xyz_tensor); + CHECK_INPUT(xyz_tensor); + + const float *new_xyz = new_xyz_tensor.data(); + const float *xyz = xyz_tensor.data(); + const int *new_coords = new_coords_tensor.data(); + const int *point_indices = point_indices_tensor.data(); + int *idx = idx_tensor.data(); + + voxel_query_kernel_launcher_stack(M, R1, R2, R3, nsample, radius, z_range, y_range, x_range, new_xyz, xyz, new_coords, point_indices, idx); + return 1; +} diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query_gpu.cu b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query_gpu.cu new file mode 100644 index 000000000..a4953662f --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query_gpu.cu @@ -0,0 +1,113 @@ +#include +#include +#include +#include + +#include "voxel_query_gpu.h" +#include "cuda_utils.h" + + +__global__ void voxel_query_kernel_stack(int M, int R1, int R2, int R3, int nsample, + float radius, int z_range, int y_range, int x_range, const float *new_xyz, + const float *xyz, const int *new_coords, const int *point_indices, int *idx) { + // :param new_coords: (M1 + M2 ..., 4) centers of the ball query + // :param point_indices: (B, Z, Y, X) + // output: + // idx: (M1 + M2, nsample) + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (pt_idx >= M) return; + + new_xyz += pt_idx * 3; + new_coords += pt_idx * 4; + idx += pt_idx * nsample; + + curandState state; + curand_init(pt_idx, 0, 0, &state); + + float radius2 = radius * radius; + float new_x = new_xyz[0]; + float new_y = new_xyz[1]; + float new_z = new_xyz[2]; + + int batch_idx = new_coords[0]; + int new_coords_z = new_coords[1]; + int new_coords_y = new_coords[2]; + int new_coords_x = new_coords[3]; + + int cnt = 0; + int cnt2 = 0; + // for (int dz = -1*z_range; dz <= z_range; ++dz) { + for (int dz = -1*z_range; dz <= z_range; ++dz) { + int z_coord = new_coords_z + dz; + if (z_coord < 0 || z_coord >= R1) continue; + + for (int dy = -1*y_range; dy <= y_range; ++dy) { + int y_coord = new_coords_y + dy; + if (y_coord < 0 || y_coord >= R2) continue; + + for (int dx = -1*x_range; dx <= x_range; ++dx) { + int x_coord = new_coords_x + dx; + if (x_coord < 0 || x_coord >= R3) continue; + + int index = batch_idx * R1 * R2 * R3 + \ + z_coord * R2 * R3 + \ + y_coord * R3 + \ + x_coord; + int neighbor_idx = point_indices[index]; + if (neighbor_idx < 0) continue; + + float x_per = xyz[neighbor_idx*3 + 0]; + float y_per = xyz[neighbor_idx*3 + 1]; + float z_per = xyz[neighbor_idx*3 + 2]; + + float dist2 = (x_per - new_x) * (x_per - new_x) + (y_per - new_y) * (y_per - new_y) + (z_per - new_z) * (z_per - new_z); + + if (dist2 > radius2) continue; + + ++cnt2; + + if (cnt < nsample) { + if (cnt == 0) { + for (int l = 0; l < nsample; ++l) { + idx[l] = neighbor_idx; + } + } + idx[cnt] = neighbor_idx; + ++cnt; + } + // else { + // float rnd = curand_uniform(&state); + // if (rnd < (float(nsample) / cnt2)) { + // int insertidx = ceilf(curand_uniform(&state) * nsample) - 1; + // idx[insertidx] = neighbor_idx; + // } + // } + } + } + } + if (cnt == 0) idx[0] = -1; +} + + +void voxel_query_kernel_launcher_stack(int M, int R1, int R2, int R3, int nsample, + float radius, int z_range, int y_range, int x_range, const float *new_xyz, + const float *xyz, const int *new_coords, const int *point_indices, int *idx) { + // :param new_coords: (M1 + M2 ..., 4) centers of the voxel query + // :param point_indices: (B, Z, Y, X) + // output: + // idx: (M1 + M2, nsample) + + cudaError_t err; + + dim3 blocks(DIVUP(M, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + voxel_query_kernel_stack<<>>(M, R1, R2, R3, nsample, radius, z_range, y_range, x_range, new_xyz, xyz, new_coords, point_indices, idx); + // cudaDeviceSynchronize(); // for using printf in kernel function + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query_gpu.h b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query_gpu.h new file mode 100644 index 000000000..eddba654d --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/src/voxel_query_gpu.h @@ -0,0 +1,19 @@ +#ifndef _STACK_VOXEL_QUERY_GPU_H +#define _STACK_VOXEL_QUERY_GPU_H + +#include +#include +#include +#include + +int voxel_query_wrapper_stack(int M, int R1, int R2, int R3, int nsample, float radius, + int z_range, int y_range, int x_range, at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, + at::Tensor new_coords_tensor, at::Tensor point_indices_tensor, at::Tensor idx_tensor); + + +void voxel_query_kernel_launcher_stack(int M, int R1, int R2, int R3, int nsample, + float radius, int z_range, int y_range, int x_range, const float *new_xyz, + const float *xyz, const int *new_coords, const int *point_indices, int *idx); + + +#endif diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/voxel_pool_modules.py b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/voxel_pool_modules.py new file mode 100644 index 000000000..033b5f1d1 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/voxel_pool_modules.py @@ -0,0 +1,131 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from . import voxel_query_utils +from typing import List + + +class NeighborVoxelSAModuleMSG(nn.Module): + + def __init__(self, *, query_ranges: List[List[int]], radii: List[float], + nsamples: List[int], mlps: List[List[int]], use_xyz: bool = True, pool_method='max_pool'): + """ + Args: + query_ranges: list of int, list of neighbor ranges to group with + nsamples: list of int, number of samples in each ball query + mlps: list of list of int, spec of the pointnet before the global pooling for each scale + use_xyz: + pool_method: max_pool / avg_pool + """ + super().__init__() + + assert len(query_ranges) == len(nsamples) == len(mlps) + + self.groupers = nn.ModuleList() + self.mlps_in = nn.ModuleList() + self.mlps_pos = nn.ModuleList() + self.mlps_out = nn.ModuleList() + for i in range(len(query_ranges)): + max_range = query_ranges[i] + nsample = nsamples[i] + radius = radii[i] + self.groupers.append(voxel_query_utils.VoxelQueryAndGrouping(max_range, radius, nsample)) + mlp_spec = mlps[i] + + cur_mlp_in = nn.Sequential( + nn.Conv1d(mlp_spec[0], mlp_spec[1], kernel_size=1, bias=False), + nn.BatchNorm1d(mlp_spec[1]) + ) + + cur_mlp_pos = nn.Sequential( + nn.Conv2d(3, mlp_spec[1], kernel_size=1, bias=False), + nn.BatchNorm2d(mlp_spec[1]) + ) + + cur_mlp_out = nn.Sequential( + nn.Conv1d(mlp_spec[1], mlp_spec[2], kernel_size=1, bias=False), + nn.BatchNorm1d(mlp_spec[2]), + nn.ReLU() + ) + + self.mlps_in.append(cur_mlp_in) + self.mlps_pos.append(cur_mlp_pos) + self.mlps_out.append(cur_mlp_out) + + self.relu = nn.ReLU() + self.pool_method = pool_method + + self.init_weights() + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d): + nn.init.kaiming_normal_(m.weight) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): + nn.init.constant_(m.weight, 1.0) + nn.init.constant_(m.bias, 0) + + def forward(self, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, \ + new_coords, features, voxel2point_indices): + """ + :param xyz: (N1 + N2 ..., 3) tensor of the xyz coordinates of the features + :param xyz_batch_cnt: (batch_size), [N1, N2, ...] + :param new_xyz: (M1 + M2 ..., 3) + :param new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + :param features: (N1 + N2 ..., C) tensor of the descriptors of the the features + :param point_indices: (B, Z, Y, X) tensor of point indices + :return: + new_xyz: (M1 + M2 ..., 3) tensor of the new features' xyz + new_features: (M1 + M2 ..., \sum_k(mlps[k][-1])) tensor of the new_features descriptors + """ + # change the order to [batch_idx, z, y, x] + new_coords = new_coords[:, [0, 3, 2, 1]].contiguous() + new_features_list = [] + for k in range(len(self.groupers)): + # features_in: (1, C, M1+M2) + features_in = features.permute(1, 0).unsqueeze(0) + features_in = self.mlps_in[k](features_in) + # features_in: (1, M1+M2, C) + features_in = features_in.permute(0, 2, 1).contiguous() + # features_in: (M1+M2, C) + features_in = features_in.view(-1, features_in.shape[-1]) + # grouped_features: (M1+M2, C, nsample) + # grouped_xyz: (M1+M2, 3, nsample) + grouped_features, grouped_xyz, empty_ball_mask = self.groupers[k]( + new_coords, xyz, xyz_batch_cnt, new_xyz, new_xyz_batch_cnt, features_in, voxel2point_indices + ) + grouped_features[empty_ball_mask] = 0 + + # grouped_features: (1, C, M1+M2, nsample) + grouped_features = grouped_features.permute(1, 0, 2).unsqueeze(dim=0) + # grouped_xyz: (M1+M2, 3, nsample) + grouped_xyz = grouped_xyz - new_xyz.unsqueeze(-1) + grouped_xyz[empty_ball_mask] = 0 + # grouped_xyz: (1, 3, M1+M2, nsample) + grouped_xyz = grouped_xyz.permute(1, 0, 2).unsqueeze(0) + # grouped_xyz: (1, C, M1+M2, nsample) + position_features = self.mlps_pos[k](grouped_xyz) + new_features = grouped_features + position_features + new_features = self.relu(new_features) + + if self.pool_method == 'max_pool': + new_features = F.max_pool2d( + new_features, kernel_size=[1, new_features.size(3)] + ).squeeze(dim=-1) # (1, C, M1 + M2 ...) + elif self.pool_method == 'avg_pool': + new_features = F.avg_pool2d( + new_features, kernel_size=[1, new_features.size(3)] + ).squeeze(dim=-1) # (1, C, M1 + M2 ...) + else: + raise NotImplementedError + + new_features = self.mlps_out[k](new_features) + new_features = new_features.squeeze(dim=0).permute(1, 0) # (M1 + M2 ..., C) + new_features_list.append(new_features) + + # (M1 + M2 ..., C) + new_features = torch.cat(new_features_list, dim=1) + return new_features + diff --git a/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/voxel_query_utils.py b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/voxel_query_utils.py new file mode 100644 index 000000000..b22da2de1 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/pointnet2/pointnet2_stack/voxel_query_utils.py @@ -0,0 +1,100 @@ +import torch +from torch.autograd import Variable +from torch.autograd import Function +import torch.nn as nn +from typing import List + +from . import pointnet2_stack_cuda as pointnet2 +from . import pointnet2_utils + +class VoxelQuery(Function): + + @staticmethod + def forward(ctx, max_range: int, radius: float, nsample: int, xyz: torch.Tensor, \ + new_xyz: torch.Tensor, new_coords: torch.Tensor, point_indices: torch.Tensor): + """ + Args: + ctx: + max_range: int, max range of voxels to be grouped + nsample: int, maximum number of features in the balls + new_coords: (M1 + M2, 4), [batch_id, z, y, x] cooridnates of keypoints + new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + point_indices: (batch_size, Z, Y, X) 4-D tensor recording the point indices of voxels + Returns: + idx: (M1 + M2, nsample) tensor with the indicies of the features that form the query balls + """ + assert new_xyz.is_contiguous() + assert xyz.is_contiguous() + assert new_coords.is_contiguous() + assert point_indices.is_contiguous() + + M = new_coords.shape[0] + B, Z, Y, X = point_indices.shape + idx = torch.cuda.IntTensor(M, nsample).zero_() + + z_range, y_range, x_range = max_range + pointnet2.voxel_query_wrapper(M, Z, Y, X, nsample, radius, z_range, y_range, x_range, \ + new_xyz, xyz, new_coords, point_indices, idx) + + empty_ball_mask = (idx[:, 0] == -1) + idx[empty_ball_mask] = 0 + + return idx, empty_ball_mask + + @staticmethod + def backward(ctx, a=None): + return None, None, None, None + +voxel_query = VoxelQuery.apply + + +class VoxelQueryAndGrouping(nn.Module): + def __init__(self, max_range: int, radius: float, nsample: int): + """ + Args: + radius: float, radius of ball + nsample: int, maximum number of features to gather in the ball + """ + super().__init__() + self.max_range, self.radius, self.nsample = max_range, radius, nsample + + def forward(self, new_coords: torch.Tensor, xyz: torch.Tensor, xyz_batch_cnt: torch.Tensor, + new_xyz: torch.Tensor, new_xyz_batch_cnt: torch.Tensor, + features: torch.Tensor, voxel2point_indices: torch.Tensor): + """ + Args: + new_coords: (M1 + M2 ..., 3) centers voxel indices of the ball query + xyz: (N1 + N2 ..., 3) xyz coordinates of the features + xyz_batch_cnt: (batch_size), [N1, N2, ...] + new_xyz: (M1 + M2 ..., 3) centers of the ball query + new_xyz_batch_cnt: (batch_size), [M1, M2, ...] + features: (N1 + N2 ..., C) tensor of features to group + voxel2point_indices: (B, Z, Y, X) tensor of points indices of voxels + + Returns: + new_features: (M1 + M2, C, nsample) tensor + """ + assert xyz.shape[0] == xyz_batch_cnt.sum(), 'xyz: %s, xyz_batch_cnt: %s' % (str(xyz.shape), str(new_xyz_batch_cnt)) + assert new_coords.shape[0] == new_xyz_batch_cnt.sum(), \ + 'new_coords: %s, new_xyz_batch_cnt: %s' % (str(new_coords.shape), str(new_xyz_batch_cnt)) + batch_size = xyz_batch_cnt.shape[0] + + # idx: (M1 + M2 ..., nsample), empty_ball_mask: (M1 + M2 ...) + idx1, empty_ball_mask1 = voxel_query(self.max_range, self.radius, self.nsample, xyz, new_xyz, new_coords, voxel2point_indices) + + idx1 = idx1.view(batch_size, -1, self.nsample) + count = 0 + for bs_idx in range(batch_size): + idx1[bs_idx] -= count + count += xyz_batch_cnt[bs_idx] + idx1 = idx1.view(-1, self.nsample) + idx1[empty_ball_mask1] = 0 + + idx = idx1 + empty_ball_mask = empty_ball_mask1 + + grouped_xyz = pointnet2_utils.grouping_operation(xyz, xyz_batch_cnt, idx, new_xyz_batch_cnt) + # grouped_features: (M1 + M2, C, nsample) + grouped_features = pointnet2_utils.grouping_operation(features, xyz_batch_cnt, idx, new_xyz_batch_cnt) + + return grouped_features, grouped_xyz, empty_ball_mask diff --git a/toolbox/openpcdet/pcdet/ops/roiaware_pool3d/__init__.py b/toolbox/openpcdet/pcdet/ops/roiaware_pool3d/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py b/toolbox/openpcdet/pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py new file mode 100644 index 000000000..d8ca924d3 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py @@ -0,0 +1,111 @@ +import torch +import torch.nn as nn +from torch.autograd import Function + +from ...utils import common_utils +from . import roiaware_pool3d_cuda + + +def points_in_boxes_cpu(points, boxes): + """ + Args: + points: (num_points, 3) + boxes: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps + Returns: + point_indices: (N, num_points) + """ + assert boxes.shape[1] == 7 + assert points.shape[1] == 3 + points, is_numpy = common_utils.check_numpy_to_torch(points) + boxes, is_numpy = common_utils.check_numpy_to_torch(boxes) + + point_indices = points.new_zeros((boxes.shape[0], points.shape[0]), dtype=torch.int) + roiaware_pool3d_cuda.points_in_boxes_cpu(boxes.float().contiguous(), points.float().contiguous(), point_indices) + + return point_indices.numpy() if is_numpy else point_indices + + +def points_in_boxes_gpu(points, boxes): + """ + :param points: (B, M, 3) + :param boxes: (B, T, 7), num_valid_boxes <= T + :return box_idxs_of_pts: (B, M), default background = -1 + """ + assert boxes.shape[0] == points.shape[0] + assert boxes.shape[2] == 7 and points.shape[2] == 3 + batch_size, num_points, _ = points.shape + + box_idxs_of_pts = points.new_zeros((batch_size, num_points), dtype=torch.int).fill_(-1) + roiaware_pool3d_cuda.points_in_boxes_gpu(boxes.contiguous(), points.contiguous(), box_idxs_of_pts) + + return box_idxs_of_pts + + +class RoIAwarePool3d(nn.Module): + def __init__(self, out_size, max_pts_each_voxel=128): + super().__init__() + self.out_size = out_size + self.max_pts_each_voxel = max_pts_each_voxel + + def forward(self, rois, pts, pts_feature, pool_method='max'): + assert pool_method in ['max', 'avg'] + return RoIAwarePool3dFunction.apply(rois, pts, pts_feature, self.out_size, self.max_pts_each_voxel, pool_method) + + +class RoIAwarePool3dFunction(Function): + @staticmethod + def forward(ctx, rois, pts, pts_feature, out_size, max_pts_each_voxel, pool_method): + """ + Args: + ctx: + rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center + pts: (npoints, 3) + pts_feature: (npoints, C) + out_size: int or tuple, like 7 or (7, 7, 7) + max_pts_each_voxel: + pool_method: 'max' or 'avg' + + Returns: + pooled_features: (N, out_x, out_y, out_z, C) + """ + assert rois.shape[1] == 7 and pts.shape[1] == 3 + if isinstance(out_size, int): + out_x = out_y = out_z = out_size + else: + assert len(out_size) == 3 + for k in range(3): + assert isinstance(out_size[k], int) + out_x, out_y, out_z = out_size + + num_rois = rois.shape[0] + num_channels = pts_feature.shape[-1] + num_pts = pts.shape[0] + + pooled_features = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels)) + argmax = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int) + pts_idx_of_voxels = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, max_pts_each_voxel), dtype=torch.int) + + pool_method_map = {'max': 0, 'avg': 1} + pool_method = pool_method_map[pool_method] + roiaware_pool3d_cuda.forward(rois, pts, pts_feature, argmax, pts_idx_of_voxels, pooled_features, pool_method) + + ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels) + return pooled_features + + @staticmethod + def backward(ctx, grad_out): + """ + :param grad_out: (N, out_x, out_y, out_z, C) + :return: + grad_in: (npoints, C) + """ + pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels = ctx.roiaware_pool3d_for_backward + + grad_in = grad_out.new_zeros((num_pts, num_channels)) + roiaware_pool3d_cuda.backward(pts_idx_of_voxels, argmax, grad_out.contiguous(), grad_in, pool_method) + + return None, None, grad_in, None, None, None + + +if __name__ == '__main__': + pass diff --git a/toolbox/openpcdet/pcdet/ops/roiaware_pool3d/src/roiaware_pool3d.cpp b/toolbox/openpcdet/pcdet/ops/roiaware_pool3d/src/roiaware_pool3d.cpp new file mode 100644 index 000000000..00edfef81 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/roiaware_pool3d/src/roiaware_pool3d.cpp @@ -0,0 +1,177 @@ +/* +RoI-aware point cloud feature pooling +Reference paper: https://arxiv.org/abs/1907.03670 +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#include +#include +#include + + +//#define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ") +//#define CHECK_CONTIGUOUS(x) AT_CHECK(x.is_contiguous(), #x, " must be contiguous ") +//#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) + + +void roiaware_pool3d_launcher(int boxes_num, int pts_num, int channels, int max_pts_each_voxel, + int out_x, int out_y, int out_z, const float *rois, const float *pts, const float *pts_feature, + int *argmax, int *pts_idx_of_voxels, float *pooled_features, int pool_method); + +void roiaware_pool3d_backward_launcher(int boxes_num, int out_x, int out_y, int out_z, int channels, int max_pts_each_voxel, + const int *pts_idx_of_voxels, const int *argmax, const float *grad_out, float *grad_in, int pool_method); + +void points_in_boxes_launcher(int batch_size, int boxes_num, int pts_num, const float *boxes, + const float *pts, int *box_idx_of_points); + +int roiaware_pool3d_gpu(at::Tensor rois, at::Tensor pts, at::Tensor pts_feature, at::Tensor argmax, + at::Tensor pts_idx_of_voxels, at::Tensor pooled_features, int pool_method){ + // params rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center + // params pts: (npoints, 3) [x, y, z] + // params pts_feature: (npoints, C) + // params argmax: (N, out_x, out_y, out_z, C) + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) + // params pooled_features: (N, out_x, out_y, out_z, C) + // params pool_method: 0: max_pool 1: avg_pool + +// CHECK_INPUT(rois); +// CHECK_INPUT(pts); +// CHECK_INPUT(pts_feature); +// CHECK_INPUT(argmax); +// CHECK_INPUT(pts_idx_of_voxels); +// CHECK_INPUT(pooled_features); + + int boxes_num = rois.size(0); + int pts_num = pts.size(0); + int channels = pts_feature.size(1); + int max_pts_each_voxel = pts_idx_of_voxels.size(4); // index 0 is the counter + int out_x = pts_idx_of_voxels.size(1); + int out_y = pts_idx_of_voxels.size(2); + int out_z = pts_idx_of_voxels.size(3); + assert ((out_x < 256) && (out_y < 256) && (out_z < 256)); // we encode index with 8bit + + const float *rois_data = rois.data(); + const float *pts_data = pts.data(); + const float *pts_feature_data = pts_feature.data(); + int *argmax_data = argmax.data(); + int *pts_idx_of_voxels_data = pts_idx_of_voxels.data(); + float *pooled_features_data = pooled_features.data(); + + roiaware_pool3d_launcher(boxes_num, pts_num, channels, max_pts_each_voxel, out_x, out_y, out_z, + rois_data, pts_data, pts_feature_data, argmax_data, pts_idx_of_voxels_data, pooled_features_data, pool_method); + + return 1; +} + +int roiaware_pool3d_gpu_backward(at::Tensor pts_idx_of_voxels, at::Tensor argmax, at::Tensor grad_out, at::Tensor grad_in, int pool_method){ + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) + // params argmax: (N, out_x, out_y, out_z, C) + // params grad_out: (N, out_x, out_y, out_z, C) + // params grad_in: (npoints, C), return value + // params pool_method: 0: max_pool 1: avg_pool + +// CHECK_INPUT(pts_idx_of_voxels); +// CHECK_INPUT(argmax); +// CHECK_INPUT(grad_out); +// CHECK_INPUT(grad_in); + + int boxes_num = pts_idx_of_voxels.size(0); + int out_x = pts_idx_of_voxels.size(1); + int out_y = pts_idx_of_voxels.size(2); + int out_z = pts_idx_of_voxels.size(3); + int max_pts_each_voxel = pts_idx_of_voxels.size(4); // index 0 is the counter + int channels = grad_out.size(4); + + const int *pts_idx_of_voxels_data = pts_idx_of_voxels.data(); + const int *argmax_data = argmax.data(); + const float *grad_out_data = grad_out.data(); + float *grad_in_data = grad_in.data(); + + roiaware_pool3d_backward_launcher(boxes_num, out_x, out_y, out_z, channels, max_pts_each_voxel, + pts_idx_of_voxels_data, argmax_data, grad_out_data, grad_in_data, pool_method); + + return 1; +} + +int points_in_boxes_gpu(at::Tensor boxes_tensor, at::Tensor pts_tensor, at::Tensor box_idx_of_points_tensor){ + // params boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center + // params pts: (B, npoints, 3) [x, y, z] + // params boxes_idx_of_points: (B, npoints), default -1 + +// CHECK_INPUT(boxes_tensor); +// CHECK_INPUT(pts_tensor); +// CHECK_INPUT(box_idx_of_points_tensor); + + int batch_size = boxes_tensor.size(0); + int boxes_num = boxes_tensor.size(1); + int pts_num = pts_tensor.size(1); + + const float *boxes = boxes_tensor.data(); + const float *pts = pts_tensor.data(); + int *box_idx_of_points = box_idx_of_points_tensor.data(); + + points_in_boxes_launcher(batch_size, boxes_num, pts_num, boxes, pts, box_idx_of_points); + + return 1; +} + + +inline void lidar_to_local_coords_cpu(float shift_x, float shift_y, float rot_angle, float &local_x, float &local_y){ + float cosa = cos(-rot_angle), sina = sin(-rot_angle); + local_x = shift_x * cosa + shift_y * (-sina); + local_y = shift_x * sina + shift_y * cosa; +} + + +inline int check_pt_in_box3d_cpu(const float *pt, const float *box3d, float &local_x, float &local_y){ + // param pt: (x, y, z) + // param box3d: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center + const float MARGIN = 1e-2; + float x = pt[0], y = pt[1], z = pt[2]; + float cx = box3d[0], cy = box3d[1], cz = box3d[2]; + float dx = box3d[3], dy = box3d[4], dz = box3d[5], rz = box3d[6]; + + if (fabsf(z - cz) > dz / 2.0) return 0; + lidar_to_local_coords_cpu(x - cx, y - cy, rz, local_x, local_y); + float in_flag = (fabs(local_x) < dx / 2.0 + MARGIN) & (fabs(local_y) < dy / 2.0 + MARGIN); + return in_flag; +} + + +int points_in_boxes_cpu(at::Tensor boxes_tensor, at::Tensor pts_tensor, at::Tensor pts_indices_tensor){ + // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps + // params pts: (num_points, 3) [x, y, z] + // params pts_indices: (N, num_points) + +// CHECK_CONTIGUOUS(boxes_tensor); +// CHECK_CONTIGUOUS(pts_tensor); +// CHECK_CONTIGUOUS(pts_indices_tensor); + + int boxes_num = boxes_tensor.size(0); + int pts_num = pts_tensor.size(0); + + const float *boxes = boxes_tensor.data(); + const float *pts = pts_tensor.data(); + int *pts_indices = pts_indices_tensor.data(); + + float local_x = 0, local_y = 0; + for (int i = 0; i < boxes_num; i++){ + for (int j = 0; j < pts_num; j++){ + int cur_in_flag = check_pt_in_box3d_cpu(pts + j * 3, boxes + i * 7, local_x, local_y); + pts_indices[i * pts_num + j] = cur_in_flag; + } + } + + return 1; +} + + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &roiaware_pool3d_gpu, "roiaware pool3d forward (CUDA)"); + m.def("backward", &roiaware_pool3d_gpu_backward, "roiaware pool3d backward (CUDA)"); + m.def("points_in_boxes_gpu", &points_in_boxes_gpu, "points_in_boxes_gpu forward (CUDA)"); + m.def("points_in_boxes_cpu", &points_in_boxes_cpu, "points_in_boxes_cpu forward (CUDA)"); +} diff --git a/toolbox/openpcdet/pcdet/ops/roiaware_pool3d/src/roiaware_pool3d_kernel.cu b/toolbox/openpcdet/pcdet/ops/roiaware_pool3d/src/roiaware_pool3d_kernel.cu new file mode 100644 index 000000000..5b52937f9 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/roiaware_pool3d/src/roiaware_pool3d_kernel.cu @@ -0,0 +1,359 @@ +/* +RoI-aware point cloud feature pooling +Written by Shaoshuai Shi +All Rights Reserved 2019-2020. +*/ + + +#include +#include + +#define THREADS_PER_BLOCK 256 +#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) +// #define DEBUG + + +__device__ inline void lidar_to_local_coords(float shift_x, float shift_y, float rot_angle, float &local_x, float &local_y){ + float cosa = cos(-rot_angle), sina = sin(-rot_angle); + local_x = shift_x * cosa + shift_y * (-sina); + local_y = shift_x * sina + shift_y * cosa; +} + + +__device__ inline int check_pt_in_box3d(const float *pt, const float *box3d, float &local_x, float &local_y){ + // param pt: (x, y, z) + // param box3d: [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center + + const float MARGIN = 1e-5; + float x = pt[0], y = pt[1], z = pt[2]; + float cx = box3d[0], cy = box3d[1], cz = box3d[2]; + float dx = box3d[3], dy = box3d[4], dz = box3d[5], rz = box3d[6]; + + if (fabsf(z - cz) > dz / 2.0) return 0; + lidar_to_local_coords(x - cx, y - cy, rz, local_x, local_y); + float in_flag = (fabs(local_x) < dx / 2.0 + MARGIN) & (fabs(local_y) < dy / 2.0 + MARGIN); + return in_flag; +} + + +__global__ void generate_pts_mask_for_box3d(int boxes_num, int pts_num, int out_x, int out_y, int out_z, + const float *rois, const float *pts, int *pts_mask){ + // params rois: [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center + // params pts: (npoints, 3) [x, y, z] + // params pts_mask: (N, npoints): -1 means point doesnot in this box, otherwise: encode (x_idxs, y_idxs, z_idxs) by binary bit + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + int box_idx = blockIdx.y; + if (pt_idx >= pts_num || box_idx >= boxes_num) return; + + pts += pt_idx * 3; + rois += box_idx * 7; + pts_mask += box_idx * pts_num + pt_idx; + + float local_x = 0, local_y = 0; + int cur_in_flag = check_pt_in_box3d(pts, rois, local_x, local_y); + + pts_mask[0] = -1; + if (cur_in_flag > 0){ + float local_z = pts[2] - rois[2]; + float dx = rois[3], dy = rois[4], dz = rois[5]; + + float x_res = dx / out_x; + float y_res = dy / out_y; + float z_res = dz / out_z; + + unsigned int x_idx = int((local_x + dx / 2) / x_res); + unsigned int y_idx = int((local_y + dy / 2) / y_res); + unsigned int z_idx = int((local_z + dz / 2) / z_res); + + x_idx = min(max(x_idx, 0), out_x - 1); + y_idx = min(max(y_idx, 0), out_y - 1); + z_idx = min(max(z_idx, 0), out_z - 1); + + unsigned int idx_encoding = (x_idx << 16) + (y_idx << 8) + z_idx; + pts_mask[0] = idx_encoding; + } +} + + +__global__ void collect_inside_pts_for_box3d(int boxes_num, int pts_num, int max_pts_each_voxel, + int out_x, int out_y, int out_z, const int *pts_mask, int *pts_idx_of_voxels){ + // params pts_mask: (N, npoints) 0 or 1 + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) + + int box_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (box_idx >= boxes_num) return; + + int max_num_pts = max_pts_each_voxel - 1; // index 0 is the counter + pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel; + + for (int k = 0; k < pts_num; k++){ + if (pts_mask[box_idx * pts_num + k] != -1){ + unsigned int idx_encoding = pts_mask[box_idx * pts_num + k]; + unsigned int x_idx = (idx_encoding >> 16) & 0xFF; + unsigned int y_idx = (idx_encoding >> 8) & 0xFF; + unsigned int z_idx = idx_encoding & 0xFF; + unsigned int base_offset = x_idx * out_y * out_z * max_pts_each_voxel + y_idx * out_z * max_pts_each_voxel + z_idx * max_pts_each_voxel; + unsigned int cnt = pts_idx_of_voxels[base_offset]; + if (cnt < max_num_pts){ + pts_idx_of_voxels[base_offset + cnt + 1] = k; + pts_idx_of_voxels[base_offset]++; + } +#ifdef DEBUG + printf("collect: pts_%d, idx(%d, %d, %d), idx_encoding=%x\n", + k, x_idx, y_idx, z_idx, idx_encoding); +#endif + + } + } +} + + +__global__ void roiaware_maxpool3d(int boxes_num, int pts_num, int channels, int max_pts_each_voxel, int out_x, + int out_y, int out_z, const float *pts_feature, const int *pts_idx_of_voxels, float *pooled_features, int *argmax){ + // params pts_feature: (npoints, C) + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel), index 0 is the counter + // params pooled_features: (N, out_x, out_y, out_z, C) + // params argmax: (N, out_x, out_y, out_z, C) + + int box_idx = blockIdx.z; + int channel_idx = blockIdx.y; + int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; + + int x_idx = voxel_idx_flat / (out_y * out_z); + int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; + int z_idx = voxel_idx_flat % out_z; + if (box_idx >= boxes_num || channel_idx >= channels|| x_idx >= out_x || y_idx >= out_y || z_idx >= out_z) return; + +#ifdef DEBUG + printf("src pts_idx_of_voxels: (%p, ), argmax: %p\n", pts_idx_of_voxels, argmax); +#endif + + int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; + pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + offset_base * max_pts_each_voxel; + pooled_features += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx; + argmax += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx; + + int argmax_idx = -1; + float max_val = -1e50; + + int total_pts = pts_idx_of_voxels[0]; + + for (int k = 1; k <= total_pts; k++){ + if (pts_feature[pts_idx_of_voxels[k] * channels + channel_idx] > max_val){ + max_val = pts_feature[pts_idx_of_voxels[k] * channels + channel_idx]; + argmax_idx = pts_idx_of_voxels[k]; + } + } + + if (argmax_idx != -1){ + pooled_features[0] = max_val; + } + argmax[0] = argmax_idx; + +#ifdef DEBUG + printf("channel_%d idx(%d, %d, %d), argmax_idx=(%d, %.3f), total=%d, after pts_idx: %p, argmax: (%p, %d)\n", + channel_idx, x_idx, y_idx, z_idx, argmax_idx, max_val, total_pts, pts_idx_of_voxels, argmax, argmax_idx); +#endif +} + + +__global__ void roiaware_avgpool3d(int boxes_num, int pts_num, int channels, int max_pts_each_voxel, int out_x, + int out_y, int out_z, const float *pts_feature, const int *pts_idx_of_voxels, float *pooled_features){ + // params pts_feature: (npoints, C) + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel), index 0 is the counter + // params pooled_features: (N, out_x, out_y, out_z, C) + // params argmax: (N, out_x, out_y, out_z, C) + + int box_idx = blockIdx.z; + int channel_idx = blockIdx.y; + int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; + + int x_idx = voxel_idx_flat / (out_y * out_z); + int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; + int z_idx = voxel_idx_flat % out_z; + if (box_idx >= boxes_num || channel_idx >= channels|| x_idx >= out_x || y_idx >= out_y || z_idx >= out_z) return; + + int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; + pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + offset_base * max_pts_each_voxel; + pooled_features += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx; + + float sum_val = 0; + int total_pts = pts_idx_of_voxels[0]; + + for (int k = 1; k <= total_pts; k++){ + sum_val += pts_feature[pts_idx_of_voxels[k] * channels + channel_idx]; + } + + if (total_pts > 0){ + pooled_features[0] = sum_val / total_pts; + } +} + + +void roiaware_pool3d_launcher(int boxes_num, int pts_num, int channels, int max_pts_each_voxel, int out_x, int out_y, int out_z, + const float *rois, const float *pts, const float *pts_feature, int *argmax, int *pts_idx_of_voxels, float *pooled_features, int pool_method){ + // params rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center + // params pts: (npoints, 3) [x, y, z] + // params pts_feature: (npoints, C) + // params argmax: (N, out_x, out_y, out_z, C) + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) + // params pooled_features: (N, out_x, out_y, out_z, C) + // params pool_method: 0: max_pool 1: avg_pool + + int *pts_mask = NULL; + cudaMalloc(&pts_mask, boxes_num * pts_num * sizeof(int)); // (N, M) + cudaMemset(pts_mask, -1, boxes_num * pts_num * sizeof(int)); + + dim3 blocks_mask(DIVUP(pts_num, THREADS_PER_BLOCK), boxes_num); + dim3 threads(THREADS_PER_BLOCK); + generate_pts_mask_for_box3d<<>>(boxes_num, pts_num, out_x, out_y, out_z, rois, pts, pts_mask); + + // TODO: Merge the collect and pool functions, SS + + dim3 blocks_collect(DIVUP(boxes_num, THREADS_PER_BLOCK)); + collect_inside_pts_for_box3d<<>>(boxes_num, pts_num, max_pts_each_voxel, + out_x, out_y, out_z, pts_mask, pts_idx_of_voxels); + + dim3 blocks_pool(DIVUP(out_x * out_y * out_z, THREADS_PER_BLOCK), channels, boxes_num); + if (pool_method == 0){ + roiaware_maxpool3d<<>>(boxes_num, pts_num, channels, max_pts_each_voxel, out_x, out_y, out_z, + pts_feature, pts_idx_of_voxels, pooled_features, argmax); + } + else if (pool_method == 1){ + roiaware_avgpool3d<<>>(boxes_num, pts_num, channels, max_pts_each_voxel, out_x, out_y, out_z, + pts_feature, pts_idx_of_voxels, pooled_features); + } + + + cudaFree(pts_mask); + +#ifdef DEBUG + cudaDeviceSynchronize(); // for using printf in kernel function +#endif +} + + +__global__ void roiaware_maxpool3d_backward(int boxes_num, int channels, int out_x, int out_y, int out_z, + const int *argmax, const float *grad_out, float *grad_in){ + // params argmax: (N, out_x, out_y, out_z, C) + // params grad_out: (N, out_x, out_y, out_z, C) + // params grad_in: (npoints, C), return value + + int box_idx = blockIdx.z; + int channel_idx = blockIdx.y; + int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; + + int x_idx = voxel_idx_flat / (out_y * out_z); + int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; + int z_idx = voxel_idx_flat % out_z; + if (box_idx >= boxes_num || channel_idx >= channels|| x_idx >= out_x || y_idx >= out_y || z_idx >= out_z) return; + + int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; + argmax += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx; + grad_out += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx; + + if (argmax[0] == -1) return; + + atomicAdd(grad_in + argmax[0] * channels + channel_idx, grad_out[0] * 1); +} + + +__global__ void roiaware_avgpool3d_backward(int boxes_num, int channels, int out_x, int out_y, int out_z, + int max_pts_each_voxel, const int *pts_idx_of_voxels, const float *grad_out, float *grad_in){ + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) + // params grad_out: (N, out_x, out_y, out_z, C) + // params grad_in: (npoints, C), return value + + int box_idx = blockIdx.z; + int channel_idx = blockIdx.y; + int voxel_idx_flat = blockIdx.x * blockDim.x + threadIdx.x; + + int x_idx = voxel_idx_flat / (out_y * out_z); + int y_idx = (voxel_idx_flat - x_idx * (out_y * out_z)) / out_z; + int z_idx = voxel_idx_flat % out_z; + if (box_idx >= boxes_num || channel_idx >= channels|| x_idx >= out_x || y_idx >= out_y || z_idx >= out_z) return; + + int offset_base = x_idx * out_y * out_z + y_idx * out_z + z_idx; + pts_idx_of_voxels += box_idx * out_x * out_y * out_z * max_pts_each_voxel + offset_base * max_pts_each_voxel; + grad_out += box_idx * out_x * out_y * out_z * channels + offset_base * channels + channel_idx; + + + int total_pts = pts_idx_of_voxels[0]; + float cur_grad = 1 / fmaxf(float(total_pts), 1.0); + for (int k = 1; k <= total_pts; k++){ + atomicAdd(grad_in + pts_idx_of_voxels[k] * channels + channel_idx, grad_out[0] * cur_grad); + } +} + + +void roiaware_pool3d_backward_launcher(int boxes_num, int out_x, int out_y, int out_z, int channels, int max_pts_each_voxel, + const int *pts_idx_of_voxels, const int *argmax, const float *grad_out, float *grad_in, int pool_method){ + // params pts_idx_of_voxels: (N, out_x, out_y, out_z, max_pts_each_voxel) + // params argmax: (N, out_x, out_y, out_z, C) + // params grad_out: (N, out_x, out_y, out_z, C) + // params grad_in: (npoints, C), return value + // params pool_method: 0: max_pool, 1: avg_pool + + dim3 blocks(DIVUP(out_x * out_y * out_z, THREADS_PER_BLOCK), channels, boxes_num); + dim3 threads(THREADS_PER_BLOCK); + if (pool_method == 0){ + roiaware_maxpool3d_backward<<>>( + boxes_num, channels, out_x, out_y, out_z, argmax, grad_out, grad_in + ); + } + else if (pool_method == 1){ + roiaware_avgpool3d_backward<<>>( + boxes_num, channels, out_x, out_y, out_z, max_pts_each_voxel, pts_idx_of_voxels, grad_out, grad_in + ); + } + +} + + +__global__ void points_in_boxes_kernel(int batch_size, int boxes_num, int pts_num, const float *boxes, + const float *pts, int *box_idx_of_points){ + // params boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center + // params pts: (B, npoints, 3) [x, y, z] in LiDAR coordinate + // params boxes_idx_of_points: (B, npoints), default -1 + + int bs_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= batch_size || pt_idx >= pts_num) return; + + boxes += bs_idx * boxes_num * 7; + pts += bs_idx * pts_num * 3 + pt_idx * 3; + box_idx_of_points += bs_idx * pts_num + pt_idx; + + float local_x = 0, local_y = 0; + int cur_in_flag = 0; + for (int k = 0; k < boxes_num; k++){ + cur_in_flag = check_pt_in_box3d(pts, boxes + k * 7, local_x, local_y); + if (cur_in_flag){ + box_idx_of_points[0] = k; + break; + } + } +} + + +void points_in_boxes_launcher(int batch_size, int boxes_num, int pts_num, const float *boxes, + const float *pts, int *box_idx_of_points){ + // params boxes: (B, N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center + // params pts: (B, npoints, 3) [x, y, z] + // params boxes_idx_of_points: (B, npoints), default -1 + cudaError_t err; + + dim3 blocks(DIVUP(pts_num, THREADS_PER_BLOCK), batch_size); + dim3 threads(THREADS_PER_BLOCK); + points_in_boxes_kernel<<>>(batch_size, boxes_num, pts_num, boxes, pts, box_idx_of_points); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } + +#ifdef DEBUG + cudaDeviceSynchronize(); // for using printf in kernel function +#endif +} diff --git a/toolbox/openpcdet/pcdet/ops/roipoint_pool3d/__init__.py b/toolbox/openpcdet/pcdet/ops/roipoint_pool3d/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/ops/roipoint_pool3d/roipoint_pool3d_utils.py b/toolbox/openpcdet/pcdet/ops/roipoint_pool3d/roipoint_pool3d_utils.py new file mode 100644 index 000000000..1e13396cc --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/roipoint_pool3d/roipoint_pool3d_utils.py @@ -0,0 +1,67 @@ +import torch +import torch.nn as nn +from torch.autograd import Function + +from ...utils import box_utils +from . import roipoint_pool3d_cuda + + +class RoIPointPool3d(nn.Module): + def __init__(self, num_sampled_points=512, pool_extra_width=1.0): + super().__init__() + self.num_sampled_points = num_sampled_points + self.pool_extra_width = pool_extra_width + + def forward(self, points, point_features, boxes3d): + """ + Args: + points: (B, N, 3) + point_features: (B, N, C) + boxes3d: (B, M, 7), [x, y, z, dx, dy, dz, heading] + + Returns: + pooled_features: (B, M, 512, 3 + C) + pooled_empty_flag: (B, M) + """ + return RoIPointPool3dFunction.apply( + points, point_features, boxes3d, self.pool_extra_width, self.num_sampled_points + ) + + +class RoIPointPool3dFunction(Function): + @staticmethod + def forward(ctx, points, point_features, boxes3d, pool_extra_width, num_sampled_points=512): + """ + Args: + ctx: + points: (B, N, 3) + point_features: (B, N, C) + boxes3d: (B, num_boxes, 7), [x, y, z, dx, dy, dz, heading] + pool_extra_width: + num_sampled_points: + + Returns: + pooled_features: (B, num_boxes, 512, 3 + C) + pooled_empty_flag: (B, num_boxes) + """ + assert points.shape.__len__() == 3 and points.shape[2] == 3 + batch_size, boxes_num, feature_len = points.shape[0], boxes3d.shape[1], point_features.shape[2] + pooled_boxes3d = box_utils.enlarge_box3d(boxes3d.view(-1, 7), pool_extra_width).view(batch_size, -1, 7) + + pooled_features = point_features.new_zeros((batch_size, boxes_num, num_sampled_points, 3 + feature_len)) + pooled_empty_flag = point_features.new_zeros((batch_size, boxes_num)).int() + + roipoint_pool3d_cuda.forward( + points.contiguous(), pooled_boxes3d.contiguous(), + point_features.contiguous(), pooled_features, pooled_empty_flag + ) + + return pooled_features, pooled_empty_flag + + @staticmethod + def backward(ctx, grad_out): + raise NotImplementedError + + +if __name__ == '__main__': + pass diff --git a/toolbox/openpcdet/pcdet/ops/roipoint_pool3d/src/roipoint_pool3d.cpp b/toolbox/openpcdet/pcdet/ops/roipoint_pool3d/src/roipoint_pool3d.cpp new file mode 100644 index 000000000..e0f58ab00 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/roipoint_pool3d/src/roipoint_pool3d.cpp @@ -0,0 +1,60 @@ +#include +#include + +#define CHECK_CUDA(x) do { \ + if (!x.type().is_cuda()) { \ + fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_CONTIGUOUS(x) do { \ + if (!x.is_contiguous()) { \ + fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) + + +void roipool3dLauncher(int batch_size, int pts_num, int boxes_num, int feature_in_len, int sampled_pts_num, + const float *xyz, const float *boxes3d, const float *pts_feature, float *pooled_features, int *pooled_empty_flag); + + +int roipool3d_gpu(at::Tensor xyz, at::Tensor boxes3d, at::Tensor pts_feature, at::Tensor pooled_features, at::Tensor pooled_empty_flag){ + // params xyz: (B, N, 3) + // params boxes3d: (B, M, 7) + // params pts_feature: (B, N, C) + // params pooled_features: (B, M, 512, 3+C) + // params pooled_empty_flag: (B, M) + CHECK_INPUT(xyz); + CHECK_INPUT(boxes3d); + CHECK_INPUT(pts_feature); + CHECK_INPUT(pooled_features); + CHECK_INPUT(pooled_empty_flag); + + int batch_size = xyz.size(0); + int pts_num = xyz.size(1); + int boxes_num = boxes3d.size(1); + int feature_in_len = pts_feature.size(2); + int sampled_pts_num = pooled_features.size(2); + + + const float * xyz_data = xyz.data(); + const float * boxes3d_data = boxes3d.data(); + const float * pts_feature_data = pts_feature.data(); + float * pooled_features_data = pooled_features.data(); + int * pooled_empty_flag_data = pooled_empty_flag.data(); + + roipool3dLauncher(batch_size, pts_num, boxes_num, feature_in_len, sampled_pts_num, + xyz_data, boxes3d_data, pts_feature_data, pooled_features_data, pooled_empty_flag_data); + + + + return 1; +} + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &roipool3d_gpu, "roipool3d forward (CUDA)"); +} + diff --git a/toolbox/openpcdet/pcdet/ops/roipoint_pool3d/src/roipoint_pool3d_kernel.cu b/toolbox/openpcdet/pcdet/ops/roipoint_pool3d/src/roipoint_pool3d_kernel.cu new file mode 100644 index 000000000..3fa034c12 --- /dev/null +++ b/toolbox/openpcdet/pcdet/ops/roipoint_pool3d/src/roipoint_pool3d_kernel.cu @@ -0,0 +1,165 @@ +/* +Point cloud feature pooling +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + +#include +#include + +#define THREADS_PER_BLOCK 256 +#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) +// #define DEBUG + + +__device__ inline void lidar_to_local_coords(float shift_x, float shift_y, float rot_angle, float &local_x, float &local_y){ + float cosa = cos(-rot_angle), sina = sin(-rot_angle); + local_x = shift_x * cosa + shift_y * (-sina); + local_y = shift_x * sina + shift_y * cosa; +} + + +__device__ inline int check_pt_in_box3d(const float *pt, const float *box3d, float &local_x, float &local_y){ + // param pt: (x, y, z) + // param box3d: [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center + + const float MARGIN = 1e-5; + float x = pt[0], y = pt[1], z = pt[2]; + float cx = box3d[0], cy = box3d[1], cz = box3d[2]; + float dx = box3d[3], dy = box3d[4], dz = box3d[5], rz = box3d[6]; + + if (fabsf(z - cz) > dz / 2.0) return 0; + lidar_to_local_coords(x - cx, y - cy, rz, local_x, local_y); + float in_flag = (fabs(local_x) < dx / 2.0 + MARGIN) & (fabs(local_y) < dy / 2.0 + MARGIN); + return in_flag; +} + + +__global__ void assign_pts_to_box3d(int batch_size, int pts_num, int boxes_num, const float *xyz, const float *boxes3d, int *pts_assign){ + // params xyz: (B, N, 3) + // params boxes3d: (B, M, 7) + // params pts_assign: (B, N, M): idx of the corresponding box3d, -1 means background points + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + int box_idx = blockIdx.y; + int bs_idx = blockIdx.z; + + if (pt_idx >= pts_num || box_idx >= boxes_num || bs_idx >= batch_size){ + return; + } + int assign_idx = bs_idx * pts_num * boxes_num + pt_idx * boxes_num + box_idx; + pts_assign[assign_idx] = 0; + + int box_offset = bs_idx * boxes_num * 7 + box_idx * 7; + int pt_offset = bs_idx * pts_num * 3 + pt_idx * 3; + + + float local_x = 0, local_y = 0; + int cur_in_flag = check_pt_in_box3d(xyz + pt_offset, boxes3d + box_offset, local_x, local_y); + pts_assign[assign_idx] = cur_in_flag; + // printf("bs=%d, pt=%d, in=%d\n", bs_idx, pt_idx, pts_assign[bs_idx * pts_num + pt_idx]); +} + + +__global__ void get_pooled_idx(int batch_size, int pts_num, int boxes_num, int sampled_pts_num, + const int *pts_assign, int *pts_idx, int *pooled_empty_flag){ + // params xyz: (B, N, 3) + // params pts_feature: (B, N, C) + // params pts_assign: (B, N) + // params pts_idx: (B, M, 512) + // params pooled_empty_flag: (B, M) + + int boxes_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (boxes_idx >= boxes_num){ + return; + } + + int bs_idx = blockIdx.y; + + int cnt = 0; + for (int k = 0; k < pts_num; k++){ + if (pts_assign[bs_idx * pts_num * boxes_num + k * boxes_num + boxes_idx]){ + if (cnt < sampled_pts_num){ + pts_idx[bs_idx * boxes_num * sampled_pts_num + boxes_idx * sampled_pts_num + cnt] = k; + cnt++; + } + else break; + } + } + + if (cnt == 0){ + pooled_empty_flag[bs_idx * boxes_num + boxes_idx] = 1; + } + else if (cnt < sampled_pts_num){ + // duplicate same points for sampling + for (int k = cnt; k < sampled_pts_num; k++){ + int duplicate_idx = k % cnt; + int base_offset = bs_idx * boxes_num * sampled_pts_num + boxes_idx * sampled_pts_num; + pts_idx[base_offset + k] = pts_idx[base_offset + duplicate_idx]; + } + } +} + + +__global__ void roipool3d_forward(int batch_size, int pts_num, int boxes_num, int feature_in_len, int sampled_pts_num, + const float *xyz, const int *pts_idx, const float *pts_feature, + float *pooled_features, int *pooled_empty_flag){ + // params xyz: (B, N, 3) + // params pts_idx: (B, M, 512) + // params pts_feature: (B, N, C) + // params pooled_features: (B, M, 512, 3+C) + // params pooled_empty_flag: (B, M) + + int sample_pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + int box_idx = blockIdx.y; + int bs_idx = blockIdx.z; + + if (sample_pt_idx >= sampled_pts_num || box_idx >= boxes_num || bs_idx >= batch_size){ + return; + } + + if (pooled_empty_flag[bs_idx * boxes_num + box_idx]){ + return; + } + + int temp_idx = bs_idx * boxes_num * sampled_pts_num + box_idx * sampled_pts_num + sample_pt_idx; + int src_pt_idx = pts_idx[temp_idx]; + int dst_feature_offset = temp_idx * (3 + feature_in_len); + + for (int j = 0; j < 3; j++) + pooled_features[dst_feature_offset + j] = xyz[bs_idx * pts_num * 3 + src_pt_idx * 3 + j]; + + int src_feature_offset = bs_idx * pts_num * feature_in_len + src_pt_idx * feature_in_len; + for (int j = 0; j < feature_in_len; j++) + pooled_features[dst_feature_offset + 3 + j] = pts_feature[src_feature_offset + j]; +} + + +void roipool3dLauncher(int batch_size, int pts_num, int boxes_num, int feature_in_len, int sampled_pts_num, + const float *xyz, const float *boxes3d, const float *pts_feature, float *pooled_features, int *pooled_empty_flag){ + + // printf("batch_size=%d, pts_num=%d, boxes_num=%d\n", batch_size, pts_num, boxes_num); + int *pts_assign = NULL; + cudaMalloc(&pts_assign, batch_size * pts_num * boxes_num * sizeof(int)); // (batch_size, N, M) + // cudaMemset(&pts_assign, -1, batch_size * pts_num * boxes_num * sizeof(int)); + + dim3 blocks(DIVUP(pts_num, THREADS_PER_BLOCK), boxes_num, batch_size); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + assign_pts_to_box3d<<>>(batch_size, pts_num, boxes_num, xyz, boxes3d, pts_assign); + + int *pts_idx = NULL; + cudaMalloc(&pts_idx, batch_size * boxes_num * sampled_pts_num * sizeof(int)); // (batch_size, M, sampled_pts_num) + + dim3 blocks2(DIVUP(boxes_num, THREADS_PER_BLOCK), batch_size); // blockIdx.x(col), blockIdx.y(row) + get_pooled_idx<<>>(batch_size, pts_num, boxes_num, sampled_pts_num, pts_assign, pts_idx, pooled_empty_flag); + + dim3 blocks_pool(DIVUP(sampled_pts_num, THREADS_PER_BLOCK), boxes_num, batch_size); + roipool3d_forward<<>>(batch_size, pts_num, boxes_num, feature_in_len, sampled_pts_num, + xyz, pts_idx, pts_feature, pooled_features, pooled_empty_flag); + + cudaFree(pts_assign); + cudaFree(pts_idx); + +#ifdef DEBUG + cudaDeviceSynchronize(); // for using printf in kernel function +#endif +} \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/utils/__init__.py b/toolbox/openpcdet/pcdet/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/toolbox/openpcdet/pcdet/utils/box_coder_utils.py b/toolbox/openpcdet/pcdet/utils/box_coder_utils.py new file mode 100644 index 000000000..50ad143d6 --- /dev/null +++ b/toolbox/openpcdet/pcdet/utils/box_coder_utils.py @@ -0,0 +1,222 @@ +import numpy as np +import torch + + +class ResidualCoder(object): + def __init__(self, code_size=7, encode_angle_by_sincos=False, **kwargs): + super().__init__() + self.code_size = code_size + self.encode_angle_by_sincos = encode_angle_by_sincos + if self.encode_angle_by_sincos: + self.code_size += 1 + + def encode_torch(self, boxes, anchors): + """ + Args: + boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...] + anchors: (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...] + + Returns: + + """ + anchors[:, 3:6] = torch.clamp_min(anchors[:, 3:6], min=1e-5) + boxes[:, 3:6] = torch.clamp_min(boxes[:, 3:6], min=1e-5) + + xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1) + xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split(boxes, 1, dim=-1) + + diagonal = torch.sqrt(dxa ** 2 + dya ** 2) + xt = (xg - xa) / diagonal + yt = (yg - ya) / diagonal + zt = (zg - za) / dza + dxt = torch.log(dxg / dxa) + dyt = torch.log(dyg / dya) + dzt = torch.log(dzg / dza) + if self.encode_angle_by_sincos: + rt_cos = torch.cos(rg) - torch.cos(ra) + rt_sin = torch.sin(rg) - torch.sin(ra) + rts = [rt_cos, rt_sin] + else: + rts = [rg - ra] + + cts = [g - a for g, a in zip(cgs, cas)] + return torch.cat([xt, yt, zt, dxt, dyt, dzt, *rts, *cts], dim=-1) + + def decode_torch(self, box_encodings, anchors): + """ + Args: + box_encodings: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading or *[cos, sin], ...] + anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...] + + Returns: + + """ + xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1) + if not self.encode_angle_by_sincos: + xt, yt, zt, dxt, dyt, dzt, rt, *cts = torch.split(box_encodings, 1, dim=-1) + else: + xt, yt, zt, dxt, dyt, dzt, cost, sint, *cts = torch.split(box_encodings, 1, dim=-1) + + diagonal = torch.sqrt(dxa ** 2 + dya ** 2) + xg = xt * diagonal + xa + yg = yt * diagonal + ya + zg = zt * dza + za + + dxg = torch.exp(dxt) * dxa + dyg = torch.exp(dyt) * dya + dzg = torch.exp(dzt) * dza + + if self.encode_angle_by_sincos: + rg_cos = cost + torch.cos(ra) + rg_sin = sint + torch.sin(ra) + rg = torch.atan2(rg_sin, rg_cos) + else: + rg = rt + ra + + cgs = [t + a for t, a in zip(cts, cas)] + return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1) + + +class PreviousResidualDecoder(object): + def __init__(self, code_size=7, **kwargs): + super().__init__() + self.code_size = code_size + + @staticmethod + def decode_torch(box_encodings, anchors): + """ + Args: + box_encodings: (B, N, 7 + ?) x, y, z, w, l, h, r, custom values + anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...] + + Returns: + + """ + xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1) + xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1) + + diagonal = torch.sqrt(dxa ** 2 + dya ** 2) + xg = xt * diagonal + xa + yg = yt * diagonal + ya + zg = zt * dza + za + + dxg = torch.exp(lt) * dxa + dyg = torch.exp(wt) * dya + dzg = torch.exp(ht) * dza + rg = rt + ra + + cgs = [t + a for t, a in zip(cts, cas)] + return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1) + + +class PreviousResidualRoIDecoder(object): + def __init__(self, code_size=7, **kwargs): + super().__init__() + self.code_size = code_size + + @staticmethod + def decode_torch(box_encodings, anchors): + """ + Args: + box_encodings: (B, N, 7 + ?) x, y, z, w, l, h, r, custom values + anchors: (B, N, 7 + C) or (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...] + + Returns: + + """ + xa, ya, za, dxa, dya, dza, ra, *cas = torch.split(anchors, 1, dim=-1) + xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1) + + diagonal = torch.sqrt(dxa ** 2 + dya ** 2) + xg = xt * diagonal + xa + yg = yt * diagonal + ya + zg = zt * dza + za + + dxg = torch.exp(lt) * dxa + dyg = torch.exp(wt) * dya + dzg = torch.exp(ht) * dza + rg = ra - rt + + cgs = [t + a for t, a in zip(cts, cas)] + return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1) + + +class PointResidualCoder(object): + def __init__(self, code_size=8, use_mean_size=True, **kwargs): + super().__init__() + self.code_size = code_size + self.use_mean_size = use_mean_size + if self.use_mean_size: + self.mean_size = torch.from_numpy(np.array(kwargs['mean_size'])).float().cuda() + assert self.mean_size.min() > 0 + + def encode_torch(self, gt_boxes, points, gt_classes=None): + """ + Args: + gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...] + points: (N, 3) [x, y, z] + gt_classes: (N) [1, num_classes] + Returns: + box_coding: (N, 8 + C) + """ + gt_boxes[:, 3:6] = torch.clamp_min(gt_boxes[:, 3:6], min=1e-5) + + xg, yg, zg, dxg, dyg, dzg, rg, *cgs = torch.split(gt_boxes, 1, dim=-1) + xa, ya, za = torch.split(points, 1, dim=-1) + + if self.use_mean_size: + assert gt_classes.max() <= self.mean_size.shape[0] + point_anchor_size = self.mean_size[gt_classes - 1] + dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1) + diagonal = torch.sqrt(dxa ** 2 + dya ** 2) + xt = (xg - xa) / diagonal + yt = (yg - ya) / diagonal + zt = (zg - za) / dza + dxt = torch.log(dxg / dxa) + dyt = torch.log(dyg / dya) + dzt = torch.log(dzg / dza) + else: + xt = (xg - xa) + yt = (yg - ya) + zt = (zg - za) + dxt = torch.log(dxg) + dyt = torch.log(dyg) + dzt = torch.log(dzg) + + cts = [g for g in cgs] + return torch.cat([xt, yt, zt, dxt, dyt, dzt, torch.cos(rg), torch.sin(rg), *cts], dim=-1) + + def decode_torch(self, box_encodings, points, pred_classes=None): + """ + Args: + box_encodings: (N, 8 + C) [x, y, z, dx, dy, dz, cos, sin, ...] + points: [x, y, z] + pred_classes: (N) [1, num_classes] + Returns: + + """ + xt, yt, zt, dxt, dyt, dzt, cost, sint, *cts = torch.split(box_encodings, 1, dim=-1) + xa, ya, za = torch.split(points, 1, dim=-1) + + if self.use_mean_size: + assert pred_classes.max() <= self.mean_size.shape[0] + point_anchor_size = self.mean_size[pred_classes - 1] + dxa, dya, dza = torch.split(point_anchor_size, 1, dim=-1) + diagonal = torch.sqrt(dxa ** 2 + dya ** 2) + xg = xt * diagonal + xa + yg = yt * diagonal + ya + zg = zt * dza + za + + dxg = torch.exp(dxt) * dxa + dyg = torch.exp(dyt) * dya + dzg = torch.exp(dzt) * dza + else: + xg = xt + xa + yg = yt + ya + zg = zt + za + dxg, dyg, dzg = torch.split(torch.exp(box_encodings[..., 3:6]), 1, dim=-1) + + rg = torch.atan2(sint, cost) + + cgs = [t for t in cts] + return torch.cat([xg, yg, zg, dxg, dyg, dzg, rg, *cgs], dim=-1) diff --git a/toolbox/openpcdet/pcdet/utils/box_utils.py b/toolbox/openpcdet/pcdet/utils/box_utils.py new file mode 100644 index 000000000..0e87de6f6 --- /dev/null +++ b/toolbox/openpcdet/pcdet/utils/box_utils.py @@ -0,0 +1,440 @@ +import numpy as np +import scipy +import torch +import copy +from scipy.spatial import Delaunay + +from ..ops.roiaware_pool3d import roiaware_pool3d_utils +from . import common_utils + + +def in_hull(p, hull): + """ + :param p: (N, K) test points + :param hull: (M, K) M corners of a box + :return (N) bool + """ + try: + if not isinstance(hull, Delaunay): + hull = Delaunay(hull) + flag = hull.find_simplex(p) >= 0 + except scipy.spatial.qhull.QhullError: + print('Warning: not a hull %s' % str(hull)) + flag = np.zeros(p.shape[0], dtype=np.bool) + + return flag + + +def boxes_to_corners_3d(boxes3d): + """ + 7 -------- 4 + /| /| + 6 -------- 5 . + | | | | + . 3 -------- 0 + |/ |/ + 2 -------- 1 + Args: + boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center + + Returns: + """ + boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d) + + template = boxes3d.new_tensor(( + [1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1], + [1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1], + )) / 2 + + corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :] + corners3d = common_utils.rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3) + corners3d += boxes3d[:, None, 0:3] + + return corners3d.numpy() if is_numpy else corners3d + +def corners_rect_to_camera(corners): + """ + 7 -------- 4 + /| /| + 6 -------- 5 . + | | | | + . 3 -------- 0 + |/ |/ + 2 -------- 1 + Args: + corners: (8, 3) [x0, y0, z0, ...], (x, y, z) is the point coordinate in image rect + + Returns: + boxes_rect: (7,) [x, y, z, l, h, w, r] in rect camera coords + """ + height_group = [(0, 4), (1, 5), (2, 6), (3, 7)] + width_group = [(0, 1), (2, 3), (4, 5), (6, 7)] + length_group = [(0, 3), (1, 2), (4, 7), (5, 6)] + vector_group = [(0, 3), (1, 2), (4, 7), (5, 6)] + height, width, length = 0., 0., 0. + vector = np.zeros(2, dtype=np.float32) + for index_h, index_w, index_l, index_v in zip(height_group, width_group, length_group, vector_group): + height += np.linalg.norm(corners[index_h[0], :] - corners[index_h[1], :]) + width += np.linalg.norm(corners[index_w[0], :] - corners[index_w[1], :]) + length += np.linalg.norm(corners[index_l[0], :] - corners[index_l[1], :]) + vector[0] += (corners[index_v[0], :] - corners[index_v[1], :])[0] + vector[1] += (corners[index_v[0], :] - corners[index_v[1], :])[2] + + height, width, length = height*1.0/4, width*1.0/4, length*1.0/4 + rotation_y = -np.arctan2(vector[1], vector[0]) + + center_point = corners.mean(axis=0) + center_point[1] += height/2 + camera_rect = np.concatenate([center_point, np.array([length, height, width, rotation_y])]) + + return camera_rect + + +def mask_boxes_outside_range_numpy(boxes, limit_range, min_num_corners=1, use_center_to_filter=True): + """ + Args: + boxes: (N, 7) [x, y, z, dx, dy, dz, heading, ...], (x, y, z) is the box center + limit_range: [minx, miny, minz, maxx, maxy, maxz] + min_num_corners: + + Returns: + + """ + if boxes.shape[1] > 7: + boxes = boxes[:, 0:7] + if use_center_to_filter: + box_centers = boxes[:, 0:3] + mask = ((box_centers >= limit_range[0:3]) & (box_centers <= limit_range[3:6])).all(axis=-1) + else: + corners = boxes_to_corners_3d(boxes) # (N, 8, 3) + corners = corners[:, :, 0:2] + mask = ((corners >= limit_range[0:2]) & (corners <= limit_range[3:5])).all(axis=2) + mask = mask.sum(axis=1) >= min_num_corners # (N) + + return mask + + +def remove_points_in_boxes3d(points, boxes3d): + """ + Args: + points: (num_points, 3 + C) + boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center, each box DO NOT overlaps + + Returns: + + """ + boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d) + points, is_numpy = common_utils.check_numpy_to_torch(points) + point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(points[:, 0:3], boxes3d) + points = points[point_masks.sum(dim=0) == 0] + + return points.numpy() if is_numpy else points + + +def boxes3d_kitti_camera_to_lidar(boxes3d_camera, calib): + """ + Args: + boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords + calib: + + Returns: + boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center + + """ + boxes3d_camera_copy = copy.deepcopy(boxes3d_camera) + xyz_camera, r = boxes3d_camera_copy[:, 0:3], boxes3d_camera_copy[:, 6:7] + l, h, w = boxes3d_camera_copy[:, 3:4], boxes3d_camera_copy[:, 4:5], boxes3d_camera_copy[:, 5:6] + + xyz_lidar = calib.rect_to_lidar(xyz_camera) + xyz_lidar[:, 2] += h[:, 0] / 2 + return np.concatenate([xyz_lidar, l, w, h, -(r + np.pi / 2)], axis=-1) + + +def boxes3d_kitti_fakelidar_to_lidar(boxes3d_lidar): + """ + Args: + boxes3d_fakelidar: (N, 7) [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center + + Returns: + boxes3d_lidar: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center + + """ + boxes3d_lidar_copy = copy.deepcopy(boxes3d_lidar) + w, l, h = boxes3d_lidar_copy[:, 3:4], boxes3d_lidar_copy[:, 4:5], boxes3d_lidar_copy[:, 5:6] + r = boxes3d_lidar_copy[:, 6:7] + + boxes3d_lidar_copy[:, 2] += h[:, 0] / 2 + return np.concatenate([boxes3d_lidar_copy[:, 0:3], l, w, h, -(r + np.pi / 2)], axis=-1) + + +def boxes3d_kitti_lidar_to_fakelidar(boxes3d_lidar): + """ + Args: + boxes3d_lidar: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center + + Returns: + boxes3d_fakelidar: [x, y, z, w, l, h, r] in old LiDAR coordinates, z is bottom center + + """ + boxes3d_lidar_copy = copy.deepcopy(boxes3d_lidar) + dx, dy, dz = boxes3d_lidar_copy[:, 3:4], boxes3d_lidar_copy[:, 4:5], boxes3d_lidar_copy[:, 5:6] + heading = boxes3d_lidar_copy[:, 6:7] + + boxes3d_lidar_copy[:, 2] -= dz[:, 0] / 2 + return np.concatenate([boxes3d_lidar_copy[:, 0:3], dy, dx, dz, -heading - np.pi / 2], axis=-1) + + +def enlarge_box3d(boxes3d, extra_width=(0, 0, 0)): + """ + Args: + boxes3d: [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center + extra_width: [extra_x, extra_y, extra_z] + + Returns: + + """ + boxes3d, is_numpy = common_utils.check_numpy_to_torch(boxes3d) + large_boxes3d = boxes3d.clone() + + large_boxes3d[:, 3:6] += boxes3d.new_tensor(extra_width)[None, :] + return large_boxes3d + + +def boxes3d_lidar_to_kitti_camera(boxes3d_lidar, calib): + """ + :param boxes3d_lidar: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center + :param calib: + :return: + boxes3d_camera: (N, 7) [x, y, z, l, h, w, r] in rect camera coords + """ + boxes3d_lidar_copy = copy.deepcopy(boxes3d_lidar) + xyz_lidar = boxes3d_lidar_copy[:, 0:3] + l, w, h = boxes3d_lidar_copy[:, 3:4], boxes3d_lidar_copy[:, 4:5], boxes3d_lidar_copy[:, 5:6] + r = boxes3d_lidar_copy[:, 6:7] + + xyz_lidar[:, 2] -= h.reshape(-1) / 2 + xyz_cam = calib.lidar_to_rect(xyz_lidar) + # xyz_cam[:, 1] += h.reshape(-1) / 2 + r = -r - np.pi / 2 + return np.concatenate([xyz_cam, l, h, w, r], axis=-1) + + +def boxes3d_to_corners3d_kitti_camera(boxes3d, bottom_center=True): + """ + :param boxes3d: (N, 7) [x, y, z, l, h, w, ry] in camera coords, see the definition of ry in KITTI dataset + :param bottom_center: whether y is on the bottom center of object + :return: corners3d: (N, 8, 3) + 7 -------- 4 + /| /| + 6 -------- 5 . + | | | | + . 3 -------- 0 + |/ |/ + 2 -------- 1 + """ + boxes_num = boxes3d.shape[0] + l, h, w = boxes3d[:, 3], boxes3d[:, 4], boxes3d[:, 5] + x_corners = np.array([l / 2., l / 2., -l / 2., -l / 2., l / 2., l / 2., -l / 2., -l / 2], dtype=np.float32).T + z_corners = np.array([w / 2., -w / 2., -w / 2., w / 2., w / 2., -w / 2., -w / 2., w / 2.], dtype=np.float32).T + if bottom_center: + y_corners = np.zeros((boxes_num, 8), dtype=np.float32) + y_corners[:, 4:8] = -h.reshape(boxes_num, 1).repeat(4, axis=1) # (N, 8) + else: + y_corners = np.array([h / 2., h / 2., h / 2., h / 2., -h / 2., -h / 2., -h / 2., -h / 2.], dtype=np.float32).T + + ry = boxes3d[:, 6] + zeros, ones = np.zeros(ry.size, dtype=np.float32), np.ones(ry.size, dtype=np.float32) + rot_list = np.array([[np.cos(ry), zeros, -np.sin(ry)], + [zeros, ones, zeros], + [np.sin(ry), zeros, np.cos(ry)]]) # (3, 3, N) + R_list = np.transpose(rot_list, (2, 0, 1)) # (N, 3, 3) + + temp_corners = np.concatenate((x_corners.reshape(-1, 8, 1), y_corners.reshape(-1, 8, 1), + z_corners.reshape(-1, 8, 1)), axis=2) # (N, 8, 3) + rotated_corners = np.matmul(temp_corners, R_list) # (N, 8, 3) + x_corners, y_corners, z_corners = rotated_corners[:, :, 0], rotated_corners[:, :, 1], rotated_corners[:, :, 2] + + x_loc, y_loc, z_loc = boxes3d[:, 0], boxes3d[:, 1], boxes3d[:, 2] + + x = x_loc.reshape(-1, 1) + x_corners.reshape(-1, 8) + y = y_loc.reshape(-1, 1) + y_corners.reshape(-1, 8) + z = z_loc.reshape(-1, 1) + z_corners.reshape(-1, 8) + + corners = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1), z.reshape(-1, 8, 1)), axis=2) + + return corners.astype(np.float32) + + +def boxes3d_kitti_camera_to_imageboxes(boxes3d, calib, image_shape=None): + """ + :param boxes3d: (N, 7) [x, y, z, l, h, w, r] in rect camera coords + :param calib: + :return: + box_2d_preds: (N, 4) [x1, y1, x2, y2] + """ + corners3d = boxes3d_to_corners3d_kitti_camera(boxes3d) + pts_img, _ = calib.rect_to_img(corners3d.reshape(-1, 3)) + corners_in_image = pts_img.reshape(-1, 8, 2) + + min_uv = np.min(corners_in_image, axis=1) # (N, 2) + max_uv = np.max(corners_in_image, axis=1) # (N, 2) + boxes2d_image = np.concatenate([min_uv, max_uv], axis=1) + if image_shape is not None: + boxes2d_image[:, 0] = np.clip(boxes2d_image[:, 0], a_min=0, a_max=image_shape[1] - 1) + boxes2d_image[:, 1] = np.clip(boxes2d_image[:, 1], a_min=0, a_max=image_shape[0] - 1) + boxes2d_image[:, 2] = np.clip(boxes2d_image[:, 2], a_min=0, a_max=image_shape[1] - 1) + boxes2d_image[:, 3] = np.clip(boxes2d_image[:, 3], a_min=0, a_max=image_shape[0] - 1) + + return boxes2d_image + + +def boxes_iou_normal(boxes_a, boxes_b): + """ + Args: + boxes_a: (N, 4) [x1, y1, x2, y2] + boxes_b: (M, 4) [x1, y1, x2, y2] + + Returns: + + """ + assert boxes_a.shape[1] == boxes_b.shape[1] == 4 + x_min = torch.max(boxes_a[:, 0, None], boxes_b[None, :, 0]) + x_max = torch.min(boxes_a[:, 2, None], boxes_b[None, :, 2]) + y_min = torch.max(boxes_a[:, 1, None], boxes_b[None, :, 1]) + y_max = torch.min(boxes_a[:, 3, None], boxes_b[None, :, 3]) + x_len = torch.clamp_min(x_max - x_min, min=0) + y_len = torch.clamp_min(y_max - y_min, min=0) + area_a = (boxes_a[:, 2] - boxes_a[:, 0]) * (boxes_a[:, 3] - boxes_a[:, 1]) + area_b = (boxes_b[:, 2] - boxes_b[:, 0]) * (boxes_b[:, 3] - boxes_b[:, 1]) + a_intersect_b = x_len * y_len + iou = a_intersect_b / torch.clamp_min(area_a[:, None] + area_b[None, :] - a_intersect_b, min=1e-6) + return iou + + +def boxes3d_lidar_to_aligned_bev_boxes(boxes3d): + """ + Args: + boxes3d: (N, 7 + C) [x, y, z, dx, dy, dz, heading] in lidar coordinate + + Returns: + aligned_bev_boxes: (N, 4) [x1, y1, x2, y2] in the above lidar coordinate + """ + rot_angle = common_utils.limit_period(boxes3d[:, 6], offset=0.5, period=np.pi).abs() + choose_dims = torch.where(rot_angle[:, None] < np.pi / 4, boxes3d[:, [3, 4]], boxes3d[:, [4, 3]]) + aligned_bev_boxes = torch.cat((boxes3d[:, 0:2] - choose_dims / 2, boxes3d[:, 0:2] + choose_dims / 2), dim=1) + return aligned_bev_boxes + + +def boxes3d_nearest_bev_iou(boxes_a, boxes_b): + """ + Args: + boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading] + boxes_b: (N, 7) [x, y, z, dx, dy, dz, heading] + + Returns: + + """ + boxes_bev_a = boxes3d_lidar_to_aligned_bev_boxes(boxes_a) + boxes_bev_b = boxes3d_lidar_to_aligned_bev_boxes(boxes_b) + + return boxes_iou_normal(boxes_bev_a, boxes_bev_b) + + +def area(box) -> torch.Tensor: + """ + Computes the area of all the boxes. + + Returns: + torch.Tensor: a vector with areas of each box. + """ + area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1]) + return area + + +# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py +# with slight modifications +def pairwise_iou(boxes1, boxes2) -> torch.Tensor: + """ + Given two lists of boxes of size N and M, + compute the IoU (intersection over union) + between __all__ N x M pairs of boxes. + The box order must be (xmin, ymin, xmax, ymax). + + Args: + boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. + + Returns: + Tensor: IoU, sized [N,M]. + """ + area1 = area(boxes1) + area2 = area(boxes2) + + width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max( + boxes1[:, None, :2], boxes2[:, :2] + ) # [N,M,2] + + width_height.clamp_(min=0) # [N,M,2] + inter = width_height.prod(dim=2) # [N,M] + del width_height + + # handle empty boxes + iou = torch.where( + inter > 0, + inter / (area1[:, None] + area2 - inter), + torch.zeros(1, dtype=inter.dtype, device=inter.device), + ) + return iou + + +def center_to_corner2d(center, dim): + corners_norm = torch.tensor([[-0.5, -0.5], [-0.5, 0.5], [0.5, 0.5], [0.5, -0.5]], device=dim.device).type_as(center) # (4, 2) + corners = dim.view([-1, 1, 2]) * corners_norm.view([1, 4, 2]) # (N, 4, 2) + corners = corners + center.view(-1, 1, 2) + return corners + + +def bbox3d_overlaps_diou(pred_boxes, gt_boxes): + """ + https://github.com/agent-sgs/PillarNet/blob/master/det3d/core/utils/center_utils.py + Args: + pred_boxes (N, 7): + gt_boxes (N, 7): + + Returns: + _type_: _description_ + """ + assert pred_boxes.shape[0] == gt_boxes.shape[0] + + qcorners = center_to_corner2d(pred_boxes[:, :2], pred_boxes[:, 3:5]) # (N, 4, 2) + gcorners = center_to_corner2d(gt_boxes[:, :2], gt_boxes[:, 3:5]) # (N, 4, 2) + + inter_max_xy = torch.minimum(qcorners[:, 2], gcorners[:, 2]) + inter_min_xy = torch.maximum(qcorners[:, 0], gcorners[:, 0]) + out_max_xy = torch.maximum(qcorners[:, 2], gcorners[:, 2]) + out_min_xy = torch.minimum(qcorners[:, 0], gcorners[:, 0]) + + # calculate area + volume_pred_boxes = pred_boxes[:, 3] * pred_boxes[:, 4] * pred_boxes[:, 5] + volume_gt_boxes = gt_boxes[:, 3] * gt_boxes[:, 4] * gt_boxes[:, 5] + + inter_h = torch.minimum(pred_boxes[:, 2] + 0.5 * pred_boxes[:, 5], gt_boxes[:, 2] + 0.5 * gt_boxes[:, 5]) - \ + torch.maximum(pred_boxes[:, 2] - 0.5 * pred_boxes[:, 5], gt_boxes[:, 2] - 0.5 * gt_boxes[:, 5]) + inter_h = torch.clamp(inter_h, min=0) + + inter = torch.clamp((inter_max_xy - inter_min_xy), min=0) + volume_inter = inter[:, 0] * inter[:, 1] * inter_h + volume_union = volume_gt_boxes + volume_pred_boxes - volume_inter + + # boxes_iou3d_gpu(pred_boxes, gt_boxes) + inter_diag = torch.pow(gt_boxes[:, 0:3] - pred_boxes[:, 0:3], 2).sum(-1) + + outer_h = torch.maximum(gt_boxes[:, 2] + 0.5 * gt_boxes[:, 5], pred_boxes[:, 2] + 0.5 * pred_boxes[:, 5]) - \ + torch.minimum(gt_boxes[:, 2] - 0.5 * gt_boxes[:, 5], pred_boxes[:, 2] - 0.5 * pred_boxes[:, 5]) + outer_h = torch.clamp(outer_h, min=0) + outer = torch.clamp((out_max_xy - out_min_xy), min=0) + outer_diag = outer[:, 0] ** 2 + outer[:, 1] ** 2 + outer_h ** 2 + + dious = volume_inter / volume_union - inter_diag / outer_diag + dious = torch.clamp(dious, min=-1.0, max=1.0) + + return dious \ No newline at end of file diff --git a/toolbox/openpcdet/pcdet/utils/calibration_kitti.py b/toolbox/openpcdet/pcdet/utils/calibration_kitti.py new file mode 100644 index 000000000..d7ff1f51b --- /dev/null +++ b/toolbox/openpcdet/pcdet/utils/calibration_kitti.py @@ -0,0 +1,125 @@ +import numpy as np + + +def get_calib_from_file(calib_file): + with open(calib_file) as f: + lines = f.readlines() + + obj = lines[2].strip().split(' ')[1:] + P2 = np.array(obj, dtype=np.float32) + obj = lines[3].strip().split(' ')[1:] + P3 = np.array(obj, dtype=np.float32) + obj = lines[4].strip().split(' ')[1:] + R0 = np.array(obj, dtype=np.float32) + obj = lines[5].strip().split(' ')[1:] + Tr_velo_to_cam = np.array(obj, dtype=np.float32) + + return {'P2': P2.reshape(3, 4), + 'P3': P3.reshape(3, 4), + 'R0': R0.reshape(3, 3), + 'Tr_velo2cam': Tr_velo_to_cam.reshape(3, 4)} + + +class Calibration(object): + def __init__(self, calib_file): + if not isinstance(calib_file, dict): + calib = get_calib_from_file(calib_file) + else: + calib = calib_file + + self.P2 = calib['P2'] # 3 x 4 + self.R0 = calib['R0'] # 3 x 3 + self.V2C = calib['Tr_velo2cam'] # 3 x 4 + + # Camera intrinsics and extrinsics + self.cu = self.P2[0, 2] + self.cv = self.P2[1, 2] + self.fu = self.P2[0, 0] + self.fv = self.P2[1, 1] + self.tx = self.P2[0, 3] / (-self.fu) + self.ty = self.P2[1, 3] / (-self.fv) + + def cart_to_hom(self, pts): + """ + :param pts: (N, 3 or 2) + :return pts_hom: (N, 4 or 3) + """ + pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32))) + return pts_hom + + def rect_to_lidar(self, pts_rect): + """ + :param pts_lidar: (N, 3) + :return pts_rect: (N, 3) + """ + pts_rect_hom = self.cart_to_hom(pts_rect) # (N, 4) + R0_ext = np.hstack((self.R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4) + R0_ext = np.vstack((R0_ext, np.zeros((1, 4), dtype=np.float32))) # (4, 4) + R0_ext[3, 3] = 1 + V2C_ext = np.vstack((self.V2C, np.zeros((1, 4), dtype=np.float32))) # (4, 4) + V2C_ext[3, 3] = 1 + + pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(np.dot(R0_ext, V2C_ext).T)) + return pts_lidar[:, 0:3] + + def lidar_to_rect(self, pts_lidar): + """ + :param pts_lidar: (N, 3) + :return pts_rect: (N, 3) + """ + pts_lidar_hom = self.cart_to_hom(pts_lidar) + pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T)) + # pts_rect = reduce(np.dot, (pts_lidar_hom, self.V2C.T, self.R0.T)) + return pts_rect + + def rect_to_img(self, pts_rect): + """ + :param pts_rect: (N, 3) + :return pts_img: (N, 2) + """ + pts_rect_hom = self.cart_to_hom(pts_rect) + pts_2d_hom = np.dot(pts_rect_hom, self.P2.T) + pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2) + pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[3, 2] # depth in rect camera coord + return pts_img, pts_rect_depth + + def lidar_to_img(self, pts_lidar): + """ + :param pts_lidar: (N, 3) + :return pts_img: (N, 2) + """ + pts_rect = self.lidar_to_rect(pts_lidar) + pts_img, pts_depth = self.rect_to_img(pts_rect) + return pts_img, pts_depth + + def img_to_rect(self, u, v, depth_rect): + """ + :param u: (N) + :param v: (N) + :param depth_rect: (N) + :return: + """ + x = ((u - self.cu) * depth_rect) / self.fu + self.tx + y = ((v - self.cv) * depth_rect) / self.fv + self.ty + pts_rect = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)), axis=1) + return pts_rect + + def corners3d_to_img_boxes(self, corners3d): + """ + :param corners3d: (N, 8, 3) corners in rect coordinate + :return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate + :return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate + """ + sample_num = corners3d.shape[0] + corners3d_hom = np.concatenate((corners3d, np.ones((sample_num, 8, 1))), axis=2) # (N, 8, 4) + + img_pts = np.matmul(corners3d_hom, self.P2.T) # (N, 8, 3) + + x, y = img_pts[:, :, 0] / img_pts[:, :, 2], img_pts[:, :, 1] / img_pts[:, :, 2] + x1, y1 = np.min(x, axis=1), np.min(y, axis=1) + x2, y2 = np.max(x, axis=1), np.max(y, axis=1) + + boxes = np.concatenate((x1.reshape(-1, 1), y1.reshape(-1, 1), x2.reshape(-1, 1), y2.reshape(-1, 1)), axis=1) + boxes_corner = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2) + + return boxes, boxes_corner diff --git a/toolbox/openpcdet/pcdet/utils/common_utils.py b/toolbox/openpcdet/pcdet/utils/common_utils.py new file mode 100644 index 000000000..af70728db --- /dev/null +++ b/toolbox/openpcdet/pcdet/utils/common_utils.py @@ -0,0 +1,295 @@ +import logging +import os +import pickle +import random +import shutil +import subprocess +import SharedArray + +import numpy as np +import torch +import torch.distributed as dist +import torch.multiprocessing as mp + + +def check_numpy_to_torch(x): + if isinstance(x, np.ndarray): + return torch.from_numpy(x).float(), True + return x, False + + +def limit_period(val, offset=0.5, period=np.pi): + val, is_numpy = check_numpy_to_torch(val) + ans = val - torch.floor(val / period + offset) * period + return ans.numpy() if is_numpy else ans + + +def drop_info_with_name(info, name): + ret_info = {} + keep_indices = [i for i, x in enumerate(info['name']) if x != name] + for key in info.keys(): + ret_info[key] = info[key][keep_indices] + return ret_info + + +def rotate_points_along_z(points, angle): + """ + Args: + points: (B, N, 3 + C) + angle: (B), angle along z-axis, angle increases x ==> y + Returns: + + """ + points, is_numpy = check_numpy_to_torch(points) + angle, _ = check_numpy_to_torch(angle) + + cosa = torch.cos(angle) + sina = torch.sin(angle) + zeros = angle.new_zeros(points.shape[0]) + ones = angle.new_ones(points.shape[0]) + rot_matrix = torch.stack(( + cosa, sina, zeros, + -sina, cosa, zeros, + zeros, zeros, ones + ), dim=1).view(-1, 3, 3).float() + points_rot = torch.matmul(points[:, :, 0:3], rot_matrix) + points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1) + return points_rot.numpy() if is_numpy else points_rot + + +def angle2matrix(angle): + """ + Args: + angle: angle along z-axis, angle increases x ==> y + Returns: + rot_matrix: (3x3 Tensor) rotation matrix + """ + + cosa = torch.cos(angle) + sina = torch.sin(angle) + rot_matrix = torch.tensor([ + [cosa, -sina, 0], + [sina, cosa, 0], + [ 0, 0, 1] + ]) + return rot_matrix + + +def mask_points_by_range(points, limit_range): + mask = (points[:, 0] >= limit_range[0]) & (points[:, 0] <= limit_range[3]) \ + & (points[:, 1] >= limit_range[1]) & (points[:, 1] <= limit_range[4]) + return mask + + +def get_voxel_centers(voxel_coords, downsample_times, voxel_size, point_cloud_range): + """ + Args: + voxel_coords: (N, 3) + downsample_times: + voxel_size: + point_cloud_range: + + Returns: + + """ + assert voxel_coords.shape[1] == 3 + voxel_centers = voxel_coords[:, [2, 1, 0]].float() # (xyz) + voxel_size = torch.tensor(voxel_size, device=voxel_centers.device).float() * downsample_times + pc_range = torch.tensor(point_cloud_range[0:3], device=voxel_centers.device).float() + voxel_centers = (voxel_centers + 0.5) * voxel_size + pc_range + return voxel_centers + + +def create_logger(log_file=None, rank=0, log_level=logging.INFO): + logger = logging.getLogger(__name__) + logger.setLevel(log_level if rank == 0 else 'ERROR') + formatter = logging.Formatter('%(asctime)s %(levelname)5s %(message)s') + console = logging.StreamHandler() + console.setLevel(log_level if rank == 0 else 'ERROR') + console.setFormatter(formatter) + logger.addHandler(console) + if log_file is not None: + file_handler = logging.FileHandler(filename=log_file) + file_handler.setLevel(log_level if rank == 0 else 'ERROR') + file_handler.setFormatter(formatter) + logger.addHandler(file_handler) + logger.propagate = False + return logger + + +def set_random_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def worker_init_fn(worker_id, seed=666): + if seed is not None: + random.seed(seed + worker_id) + np.random.seed(seed + worker_id) + torch.manual_seed(seed + worker_id) + torch.cuda.manual_seed(seed + worker_id) + torch.cuda.manual_seed_all(seed + worker_id) + + +def get_pad_params(desired_size, cur_size): + """ + Get padding parameters for np.pad function + Args: + desired_size: int, Desired padded output size + cur_size: int, Current size. Should always be less than or equal to cur_size + Returns: + pad_params: tuple(int), Number of values padded to the edges (before, after) + """ + assert desired_size >= cur_size + + # Calculate amount to pad + diff = desired_size - cur_size + pad_params = (0, diff) + + return pad_params + + +def keep_arrays_by_name(gt_names, used_classes): + inds = [i for i, x in enumerate(gt_names) if x in used_classes] + inds = np.array(inds, dtype=np.int64) + return inds + + +def init_dist_slurm(tcp_port, local_rank, backend='nccl'): + """ + modified from https://github.com/open-mmlab/mmdetection + Args: + tcp_port: + backend: + + Returns: + + """ + proc_id = int(os.environ['SLURM_PROCID']) + ntasks = int(os.environ['SLURM_NTASKS']) + node_list = os.environ['SLURM_NODELIST'] + num_gpus = torch.cuda.device_count() + torch.cuda.set_device(proc_id % num_gpus) + addr = subprocess.getoutput('scontrol show hostname {} | head -n1'.format(node_list)) + os.environ['MASTER_PORT'] = str(tcp_port) + os.environ['MASTER_ADDR'] = addr + os.environ['WORLD_SIZE'] = str(ntasks) + os.environ['RANK'] = str(proc_id) + dist.init_process_group(backend=backend) + + total_gpus = dist.get_world_size() + rank = dist.get_rank() + return total_gpus, rank + + +def init_dist_pytorch(tcp_port, local_rank, backend='nccl'): + if mp.get_start_method(allow_none=True) is None: + mp.set_start_method('spawn') + # os.environ['MASTER_PORT'] = str(tcp_port) + # os.environ['MASTER_ADDR'] = 'localhost' + num_gpus = torch.cuda.device_count() + torch.cuda.set_device(local_rank % num_gpus) + + dist.init_process_group( + backend=backend, + # init_method='tcp://127.0.0.1:%d' % tcp_port, + # rank=local_rank, + # world_size=num_gpus + ) + rank = dist.get_rank() + return num_gpus, rank + + +def get_dist_info(return_gpu_per_machine=False): + if torch.__version__ < '1.0': + initialized = dist._initialized + else: + if dist.is_available(): + initialized = dist.is_initialized() + else: + initialized = False + if initialized: + rank = dist.get_rank() + world_size = dist.get_world_size() + else: + rank = 0 + world_size = 1 + + if return_gpu_per_machine: + gpu_per_machine = torch.cuda.device_count() + return rank, world_size, gpu_per_machine + + return rank, world_size + + +def merge_results_dist(result_part, size, tmpdir): + rank, world_size = get_dist_info() + os.makedirs(tmpdir, exist_ok=True) + + dist.barrier() + pickle.dump(result_part, open(os.path.join(tmpdir, 'result_part_{}.pkl'.format(rank)), 'wb')) + dist.barrier() + + if rank != 0: + return None + + part_list = [] + for i in range(world_size): + part_file = os.path.join(tmpdir, 'result_part_{}.pkl'.format(i)) + part_list.append(pickle.load(open(part_file, 'rb'))) + + ordered_results = [] + for res in zip(*part_list): + ordered_results.extend(list(res)) + ordered_results = ordered_results[:size] + shutil.rmtree(tmpdir) + return ordered_results + + +def scatter_point_inds(indices, point_inds, shape): + ret = -1 * torch.ones(*shape, dtype=point_inds.dtype, device=point_inds.device) + ndim = indices.shape[-1] + flattened_indices = indices.view(-1, ndim) + slices = [flattened_indices[:, i] for i in range(ndim)] + ret[slices] = point_inds + return ret + + +def generate_voxel2pinds(sparse_tensor): + device = sparse_tensor.indices.device + batch_size = sparse_tensor.batch_size + spatial_shape = sparse_tensor.spatial_shape + indices = sparse_tensor.indices.long() + point_indices = torch.arange(indices.shape[0], device=device, dtype=torch.int32) + output_shape = [batch_size] + list(spatial_shape) + v2pinds_tensor = scatter_point_inds(indices, point_indices, output_shape) + return v2pinds_tensor + + +def sa_create(name, var): + x = SharedArray.create(name, var.shape, dtype=var.dtype) + x[...] = var[...] + x.flags.writeable = False + return x + + +class AverageMeter(object): + """Computes and stores the average and current value""" + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count diff --git a/toolbox/openpcdet/pcdet/utils/commu_utils.py b/toolbox/openpcdet/pcdet/utils/commu_utils.py new file mode 100644 index 000000000..d9e866fba --- /dev/null +++ b/toolbox/openpcdet/pcdet/utils/commu_utils.py @@ -0,0 +1,182 @@ +""" +This file contains primitives for multi-gpu communication. +This is useful when doing distributed training. + +deeply borrow from maskrcnn-benchmark and ST3D +""" + +import pickle +import time + +import torch +import torch.distributed as dist + + +def get_world_size(): + if not dist.is_available(): + return 1 + if not dist.is_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def synchronize(): + """ + Helper function to synchronize (barrier) among all processes when + using distributed training + """ + if not dist.is_available(): + return + if not dist.is_initialized(): + return + world_size = dist.get_world_size() + if world_size == 1: + return + dist.barrier() + + +def all_gather(data): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors) + Args: + data: any picklable object + Returns: + list[data]: list of data gathered from each rank + """ + world_size = get_world_size() + if world_size == 1: + return [data] + + # serialized to a Tensor + origin_size = None + if not isinstance(data, torch.Tensor): + buffer = pickle.dumps(data) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to("cuda") + else: + origin_size = data.size() + tensor = data.reshape(-1) + + tensor_type = tensor.dtype + + # obtain Tensor size of each rank + local_size = torch.LongTensor([tensor.numel()]).to("cuda") + size_list = [torch.LongTensor([0]).to("cuda") for _ in range(world_size)] + dist.all_gather(size_list, local_size) + size_list = [int(size.item()) for size in size_list] + max_size = max(size_list) + + # receiving Tensor from all ranks + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + tensor_list = [] + for _ in size_list: + tensor_list.append(torch.FloatTensor(size=(max_size,)).cuda().to(tensor_type)) + if local_size != max_size: + padding = torch.FloatTensor(size=(max_size - local_size,)).cuda().to(tensor_type) + tensor = torch.cat((tensor, padding), dim=0) + dist.all_gather(tensor_list, tensor) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + if origin_size is None: + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + else: + buffer = tensor[:size] + data_list.append(buffer) + + if origin_size is not None: + new_shape = [-1] + list(origin_size[1:]) + resized_list = [] + for data in data_list: + # suppose the difference of tensor size exist in first dimension + data = data.reshape(new_shape) + resized_list.append(data) + + return resized_list + else: + return data_list + + +def reduce_dict(input_dict, average=True): + """ + Args: + input_dict (dict): all the values will be reduced + average (bool): whether to do average or sum + Reduce the values in the dictionary from all processes so that process with rank + 0 has the averaged results. Returns a dict with the same fields as + input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.reduce(values, dst=0) + if dist.get_rank() == 0 and average: + # only main process gets accumulated, so only divide by + # world_size in this case + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict + + +def average_reduce_value(data): + data_list = all_gather(data) + return sum(data_list) / len(data_list) + + +def all_reduce(data, op="sum", average=False): + + def op_map(op): + op_dict = { + "SUM": dist.ReduceOp.SUM, + "MAX": dist.ReduceOp.MAX, + "MIN": dist.ReduceOp.MIN, + "PRODUCT": dist.ReduceOp.PRODUCT, + } + return op_dict[op] + + world_size = get_world_size() + if world_size > 1: + reduced_data = data.clone() + dist.all_reduce(reduced_data, op=op_map(op.upper())) + if average: + assert op.upper() == 'SUM' + return reduced_data / world_size + else: + return reduced_data + return data + + +@torch.no_grad() +def concat_all_gather(tensor): + """ + Performs all_gather operation on the provided tensors. + *** Warning ***: torch.distributed.all_gather has no gradient. + """ + tensors_gather = [torch.ones_like(tensor) + for _ in range(torch.distributed.get_world_size())] + torch.distributed.all_gather(tensors_gather, tensor, async_op=False) + + output = torch.cat(tensors_gather, dim=0) + return output diff --git a/toolbox/openpcdet/pcdet/utils/loss_utils.py b/toolbox/openpcdet/pcdet/utils/loss_utils.py new file mode 100644 index 000000000..bd114bae4 --- /dev/null +++ b/toolbox/openpcdet/pcdet/utils/loss_utils.py @@ -0,0 +1,649 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from . import box_utils +from pcdet.ops.iou3d_nms import iou3d_nms_utils + + +class SigmoidFocalClassificationLoss(nn.Module): + """ + Sigmoid focal cross entropy loss. + """ + + def __init__(self, gamma: float = 2.0, alpha: float = 0.25): + """ + Args: + gamma: Weighting parameter to balance loss for hard and easy examples. + alpha: Weighting parameter to balance loss for positive and negative examples. + """ + super(SigmoidFocalClassificationLoss, self).__init__() + self.alpha = alpha + self.gamma = gamma + + @staticmethod + def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor): + """ PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits: + max(x, 0) - x * z + log(1 + exp(-abs(x))) in + https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits + + Args: + input: (B, #anchors, #classes) float tensor. + Predicted logits for each class + target: (B, #anchors, #classes) float tensor. + One-hot encoded classification targets + + Returns: + loss: (B, #anchors, #classes) float tensor. + Sigmoid cross entropy loss without reduction + """ + loss = torch.clamp(input, min=0) - input * target + \ + torch.log1p(torch.exp(-torch.abs(input))) + return loss + + def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor): + """ + Args: + input: (B, #anchors, #classes) float tensor. + Predicted logits for each class + target: (B, #anchors, #classes) float tensor. + One-hot encoded classification targets + weights: (B, #anchors) float tensor. + Anchor-wise weights. + + Returns: + weighted_loss: (B, #anchors, #classes) float tensor after weighting. + """ + pred_sigmoid = torch.sigmoid(input) + alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha) + pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid + focal_weight = alpha_weight * torch.pow(pt, self.gamma) + + bce_loss = self.sigmoid_cross_entropy_with_logits(input, target) + + loss = focal_weight * bce_loss + + if weights.shape.__len__() == 2 or \ + (weights.shape.__len__() == 1 and target.shape.__len__() == 2): + weights = weights.unsqueeze(-1) + + assert weights.shape.__len__() == loss.shape.__len__() + + return loss * weights + + +class WeightedSmoothL1Loss(nn.Module): + """ + Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss + https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py + | 0.5 * x ** 2 / beta if abs(x) < beta + smoothl1(x) = | + | abs(x) - 0.5 * beta otherwise, + where x = input - target. + """ + def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None): + """ + Args: + beta: Scalar float. + L1 to L2 change point. + For beta values < 1e-5, L1 loss is computed. + code_weights: (#codes) float list if not None. + Code-wise weights. + """ + super(WeightedSmoothL1Loss, self).__init__() + self.beta = beta + if code_weights is not None: + self.code_weights = np.array(code_weights, dtype=np.float32) + self.code_weights = torch.from_numpy(self.code_weights).cuda() + + @staticmethod + def smooth_l1_loss(diff, beta): + if beta < 1e-5: + loss = torch.abs(diff) + else: + n = torch.abs(diff) + loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta) + + return loss + + def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None): + """ + Args: + input: (B, #anchors, #codes) float tensor. + Ecoded predicted locations of objects. + target: (B, #anchors, #codes) float tensor. + Regression targets. + weights: (B, #anchors) float tensor if not None. + + Returns: + loss: (B, #anchors) float tensor. + Weighted smooth l1 loss without reduction. + """ + target = torch.where(torch.isnan(target), input, target) # ignore nan targets + + diff = input - target + # code-wise weighting + if self.code_weights is not None: + diff = diff * self.code_weights.view(1, 1, -1) + + loss = self.smooth_l1_loss(diff, self.beta) + + # anchor-wise weighting + if weights is not None: + assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1] + loss = loss * weights.unsqueeze(-1) + + return loss + + +class WeightedL1Loss(nn.Module): + def __init__(self, code_weights: list = None): + """ + Args: + code_weights: (#codes) float list if not None. + Code-wise weights. + """ + super(WeightedL1Loss, self).__init__() + if code_weights is not None: + self.code_weights = np.array(code_weights, dtype=np.float32) + self.code_weights = torch.from_numpy(self.code_weights).cuda() + + @torch.cuda.amp.custom_fwd(cast_inputs=torch.float16) + def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None): + """ + Args: + input: (B, #anchors, #codes) float tensor. + Ecoded predicted locations of objects. + target: (B, #anchors, #codes) float tensor. + Regression targets. + weights: (B, #anchors) float tensor if not None. + + Returns: + loss: (B, #anchors) float tensor. + Weighted smooth l1 loss without reduction. + """ + target = torch.where(torch.isnan(target), input, target) # ignore nan targets + + diff = input - target + # code-wise weighting + if self.code_weights is not None: + diff = diff * self.code_weights.view(1, 1, -1) + + loss = torch.abs(diff) + + # anchor-wise weighting + if weights is not None: + assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1] + loss = loss * weights.unsqueeze(-1) + + return loss + + +class WeightedCrossEntropyLoss(nn.Module): + """ + Transform input to fit the fomation of PyTorch offical cross entropy loss + with anchor-wise weighting. + """ + def __init__(self): + super(WeightedCrossEntropyLoss, self).__init__() + + def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor): + """ + Args: + input: (B, #anchors, #classes) float tensor. + Predited logits for each class. + target: (B, #anchors, #classes) float tensor. + One-hot classification targets. + weights: (B, #anchors) float tensor. + Anchor-wise weights. + + Returns: + loss: (B, #anchors) float tensor. + Weighted cross entropy loss without reduction + """ + input = input.permute(0, 2, 1) + target = target.argmax(dim=-1) + loss = F.cross_entropy(input, target, reduction='none') * weights + return loss + + +def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor): + """ + Args: + pred_bbox3d: (N, 7) float Tensor. + gt_bbox3d: (N, 7) float Tensor. + + Returns: + corner_loss: (N) float Tensor. + """ + assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0] + + pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d) + gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d) + + gt_bbox3d_flip = gt_bbox3d.clone() + gt_bbox3d_flip[:, 6] += np.pi + gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip) + # (N, 8) + corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2), + torch.norm(pred_box_corners - gt_box_corners_flip, dim=2)) + # (N, 8) + corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0) + + return corner_loss.mean(dim=1) + + +def compute_fg_mask(gt_boxes2d, shape, downsample_factor=1, device=torch.device("cpu")): + """ + Compute foreground mask for images + Args: + gt_boxes2d: (B, N, 4), 2D box labels + shape: torch.Size or tuple, Foreground mask desired shape + downsample_factor: int, Downsample factor for image + device: torch.device, Foreground mask desired device + Returns: + fg_mask (shape), Foreground mask + """ + fg_mask = torch.zeros(shape, dtype=torch.bool, device=device) + + # Set box corners + gt_boxes2d /= downsample_factor + gt_boxes2d[:, :, :2] = torch.floor(gt_boxes2d[:, :, :2]) + gt_boxes2d[:, :, 2:] = torch.ceil(gt_boxes2d[:, :, 2:]) + gt_boxes2d = gt_boxes2d.long() + + # Set all values within each box to True + B, N = gt_boxes2d.shape[:2] + for b in range(B): + for n in range(N): + u1, v1, u2, v2 = gt_boxes2d[b, n] + fg_mask[b, v1:v2, u1:u2] = True + + return fg_mask + + +def neg_loss_cornernet(pred, gt, mask=None): + """ + Refer to https://github.com/tianweiy/CenterPoint. + Modified focal loss. Exactly the same as CornerNet. Runs faster and costs a little bit more memory + Args: + pred: (batch x c x h x w) + gt: (batch x c x h x w) + mask: (batch x h x w) + Returns: + """ + pos_inds = gt.eq(1).float() + neg_inds = gt.lt(1).float() + + neg_weights = torch.pow(1 - gt, 4) + + loss = 0 + + pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds + neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds + + if mask is not None: + mask = mask[:, None, :, :].float() + pos_loss = pos_loss * mask + neg_loss = neg_loss * mask + num_pos = (pos_inds.float() * mask).sum() + else: + num_pos = pos_inds.float().sum() + + pos_loss = pos_loss.sum() + neg_loss = neg_loss.sum() + + if num_pos == 0: + loss = loss - neg_loss + else: + loss = loss - (pos_loss + neg_loss) / num_pos + return loss + + +def neg_loss_sparse(pred, gt): + """ + Refer to https://github.com/tianweiy/CenterPoint. + Modified focal loss. Exactly the same as CornerNet. Runs faster and costs a little bit more memory + Args: + pred: (batch x c x n) + gt: (batch x c x n) + Returns: + """ + pos_inds = gt.eq(1).float() + neg_inds = gt.lt(1).float() + + neg_weights = torch.pow(1 - gt, 4) + + loss = 0 + + pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds + neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds + + num_pos = pos_inds.float().sum() + + pos_loss = pos_loss.sum() + neg_loss = neg_loss.sum() + + if num_pos == 0: + loss = loss - neg_loss + else: + loss = loss - (pos_loss + neg_loss) / num_pos + return loss + + +class FocalLossCenterNet(nn.Module): + """ + Refer to https://github.com/tianweiy/CenterPoint + """ + def __init__(self): + super(FocalLossCenterNet, self).__init__() + self.neg_loss = neg_loss_cornernet + + def forward(self, out, target, mask=None): + return self.neg_loss(out, target, mask=mask) + + +def _reg_loss(regr, gt_regr, mask): + """ + Refer to https://github.com/tianweiy/CenterPoint + L1 regression loss + Args: + regr (batch x max_objects x dim) + gt_regr (batch x max_objects x dim) + mask (batch x max_objects) + Returns: + """ + num = mask.float().sum() + mask = mask.unsqueeze(2).expand_as(gt_regr).float() + isnotnan = (~ torch.isnan(gt_regr)).float() + mask *= isnotnan + regr = regr * mask + gt_regr = gt_regr * mask + + loss = torch.abs(regr - gt_regr) + loss = loss.transpose(2, 0) + + loss = torch.sum(loss, dim=2) + loss = torch.sum(loss, dim=1) + # else: + # # D x M x B + # loss = loss.reshape(loss.shape[0], -1) + + # loss = loss / (num + 1e-4) + loss = loss / torch.clamp_min(num, min=1.0) + # import pdb; pdb.set_trace() + return loss + + +def _gather_feat(feat, ind, mask=None): + dim = feat.size(2) + ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim) + feat = feat.gather(1, ind) + if mask is not None: + mask = mask.unsqueeze(2).expand_as(feat) + feat = feat[mask] + feat = feat.view(-1, dim) + return feat + + +def _transpose_and_gather_feat(feat, ind): + feat = feat.permute(0, 2, 3, 1).contiguous() + feat = feat.view(feat.size(0), -1, feat.size(3)) + feat = _gather_feat(feat, ind) + return feat + + +class RegLossCenterNet(nn.Module): + """ + Refer to https://github.com/tianweiy/CenterPoint + """ + + def __init__(self): + super(RegLossCenterNet, self).__init__() + + def forward(self, output, mask, ind=None, target=None): + """ + Args: + output: (batch x dim x h x w) or (batch x max_objects) + mask: (batch x max_objects) + ind: (batch x max_objects) + target: (batch x max_objects x dim) + Returns: + """ + if ind is None: + pred = output + else: + pred = _transpose_and_gather_feat(output, ind) + loss = _reg_loss(pred, target, mask) + return loss + + +class FocalLossSparse(nn.Module): + """ + Refer to https://github.com/tianweiy/CenterPoint + """ + def __init__(self): + super(FocalLossSparse, self).__init__() + self.neg_loss = neg_loss_sparse + + def forward(self, out, target): + return self.neg_loss(out, target) + + +class RegLossSparse(nn.Module): + """ + Refer to https://github.com/tianweiy/CenterPoint + """ + + def __init__(self): + super(RegLossSparse, self).__init__() + + def forward(self, output, mask, ind=None, target=None, batch_index=None): + """ + Args: + output: (N x dim) + mask: (batch x max_objects) + ind: (batch x max_objects) + target: (batch x max_objects x dim) + Returns: + """ + + pred = [] + batch_size = mask.shape[0] + for bs_idx in range(batch_size): + batch_inds = batch_index==bs_idx + pred.append(output[batch_inds][ind[bs_idx]]) + pred = torch.stack(pred) + + loss = _reg_loss(pred, target, mask) + return loss + + +class IouLossSparse(nn.Module): + '''IouLoss loss for an output tensor + Arguments: + output (batch x dim x h x w) + mask (batch x max_objects) + ind (batch x max_objects) + target (batch x max_objects x dim) + ''' + + def __init__(self): + super(IouLossSparse, self).__init__() + + def forward(self, iou_pred, mask, ind, box_pred, box_gt, batch_index): + if mask.sum() == 0: + return iou_pred.new_zeros((1)) + batch_size = mask.shape[0] + mask = mask.bool() + + loss = 0 + for bs_idx in range(batch_size): + batch_inds = batch_index==bs_idx + pred = iou_pred[batch_inds][ind[bs_idx]][mask[bs_idx]] + pred_box = box_pred[batch_inds][ind[bs_idx]][mask[bs_idx]] + target = iou3d_nms_utils.boxes_aligned_iou3d_gpu(pred_box, box_gt[bs_idx]) + target = 2 * target - 1 + loss += F.l1_loss(pred, target, reduction='sum') + + loss = loss / (mask.sum() + 1e-4) + return loss + +class IouRegLossSparse(nn.Module): + '''Distance IoU loss for output boxes + Arguments: + output (batch x dim x h x w) + mask (batch x max_objects) + ind (batch x max_objects) + target (batch x max_objects x dim) + ''' + + def __init__(self, type="DIoU"): + super(IouRegLossSparse, self).__init__() + + def center_to_corner2d(self, center, dim): + corners_norm = torch.tensor([[-0.5, -0.5], [-0.5, 0.5], [0.5, 0.5], [0.5, -0.5]], + dtype=torch.float32, device=dim.device) + corners = dim.view([-1, 1, 2]) * corners_norm.view([1, 4, 2]) + corners = corners + center.view(-1, 1, 2) + return corners + + def bbox3d_iou_func(self, pred_boxes, gt_boxes): + assert pred_boxes.shape[0] == gt_boxes.shape[0] + + qcorners = self.center_to_corner2d(pred_boxes[:, :2], pred_boxes[:, 3:5]) + gcorners = self.center_to_corner2d(gt_boxes[:, :2], gt_boxes[:, 3:5]) + + inter_max_xy = torch.minimum(qcorners[:, 2], gcorners[:, 2]) + inter_min_xy = torch.maximum(qcorners[:, 0], gcorners[:, 0]) + out_max_xy = torch.maximum(qcorners[:, 2], gcorners[:, 2]) + out_min_xy = torch.minimum(qcorners[:, 0], gcorners[:, 0]) + + # calculate area + volume_pred_boxes = pred_boxes[:, 3] * pred_boxes[:, 4] * pred_boxes[:, 5] + volume_gt_boxes = gt_boxes[:, 3] * gt_boxes[:, 4] * gt_boxes[:, 5] + + inter_h = torch.minimum(pred_boxes[:, 2] + 0.5 * pred_boxes[:, 5], gt_boxes[:, 2] + 0.5 * gt_boxes[:, 5]) - \ + torch.maximum(pred_boxes[:, 2] - 0.5 * pred_boxes[:, 5], gt_boxes[:, 2] - 0.5 * gt_boxes[:, 5]) + inter_h = torch.clamp(inter_h, min=0) + + inter = torch.clamp((inter_max_xy - inter_min_xy), min=0) + volume_inter = inter[:, 0] * inter[:, 1] * inter_h + volume_union = volume_gt_boxes + volume_pred_boxes - volume_inter + + # boxes_iou3d_gpu(pred_boxes, gt_boxes) + inter_diag = torch.pow(gt_boxes[:, 0:3] - pred_boxes[:, 0:3], 2).sum(-1) + + outer_h = torch.maximum(gt_boxes[:, 2] + 0.5 * gt_boxes[:, 5], pred_boxes[:, 2] + 0.5 * pred_boxes[:, 5]) - \ + torch.minimum(gt_boxes[:, 2] - 0.5 * gt_boxes[:, 5], pred_boxes[:, 2] - 0.5 * pred_boxes[:, 5]) + outer_h = torch.clamp(outer_h, min=0) + outer = torch.clamp((out_max_xy - out_min_xy), min=0) + outer_diag = outer[:, 0] ** 2 + outer[:, 1] ** 2 + outer_h ** 2 + + dious = volume_inter / volume_union - inter_diag / outer_diag + dious = torch.clamp(dious, min=-1.0, max=1.0) + + return dious + + def forward(self, box_pred, mask, ind, box_gt, batch_index): + if mask.sum() == 0: + return box_pred.new_zeros((1)) + mask = mask.bool() + batch_size = mask.shape[0] + + loss = 0 + for bs_idx in range(batch_size): + batch_inds = batch_index==bs_idx + pred_box = box_pred[batch_inds][ind[bs_idx]] + iou = self.bbox3d_iou_func(pred_box[mask[bs_idx]], box_gt[bs_idx]) + loss += (1. - iou).sum() + + loss = loss / (mask.sum() + 1e-4) + return loss + +class L1Loss(nn.Module): + def __init__(self): + super(L1Loss, self).__init__() + + def forward(self, pred, target): + if target.numel() == 0: + return pred.sum() * 0 + assert pred.size() == target.size() + loss = torch.abs(pred - target) + return loss + + +class GaussianFocalLoss(nn.Module): + """GaussianFocalLoss is a variant of focal loss. + + More details can be found in the `paper + `_ + Code is modified from `kp_utils.py + `_ # noqa: E501 + Please notice that the target in GaussianFocalLoss is a gaussian heatmap, + not 0/1 binary target. + + Args: + alpha (float): Power of prediction. + gamma (float): Power of target for negative samples. + reduction (str): Options are "none", "mean" and "sum". + loss_weight (float): Loss weight of current loss. + """ + + def __init__(self, + alpha=2.0, + gamma=4.0): + super(GaussianFocalLoss, self).__init__() + self.alpha = alpha + self.gamma = gamma + + def forward(self, pred, target): + eps = 1e-12 + pos_weights = target.eq(1) + neg_weights = (1 - target).pow(self.gamma) + pos_loss = -(pred + eps).log() * (1 - pred).pow(self.alpha) * pos_weights + neg_loss = -(1 - pred + eps).log() * pred.pow(self.alpha) * neg_weights + + return pos_loss + neg_loss + + +def calculate_iou_loss_centerhead(iou_preds, batch_box_preds, mask, ind, gt_boxes): + """ + Args: + iou_preds: (batch x 1 x h x w) + batch_box_preds: (batch x (7 or 9) x h x w) + mask: (batch x max_objects) + ind: (batch x max_objects) + gt_boxes: (batch x N, 7 or 9) + Returns: + """ + if mask.sum() == 0: + return iou_preds.new_zeros((1)) + + mask = mask.bool() + selected_iou_preds = _transpose_and_gather_feat(iou_preds, ind)[mask] + + selected_box_preds = _transpose_and_gather_feat(batch_box_preds, ind)[mask] + iou_target = iou3d_nms_utils.paired_boxes_iou3d_gpu(selected_box_preds[:, 0:7], gt_boxes[mask][:, 0:7]) + # iou_target = iou3d_nms_utils.boxes_iou3d_gpu(selected_box_preds[:, 0:7].clone(), gt_boxes[mask][:, 0:7].clone()).diag() + iou_target = iou_target * 2 - 1 # [0, 1] ==> [-1, 1] + + # print(selected_iou_preds.view(-1), iou_target) + loss = F.l1_loss(selected_iou_preds.view(-1), iou_target, reduction='sum') + loss = loss / torch.clamp(mask.sum(), min=1e-4) + return loss + + +def calculate_iou_reg_loss_centerhead(batch_box_preds, mask, ind, gt_boxes): + if mask.sum() == 0: + return batch_box_preds.new_zeros((1)) + + mask = mask.bool() + + selected_box_preds = _transpose_and_gather_feat(batch_box_preds, ind) + + iou = box_utils.bbox3d_overlaps_diou(selected_box_preds[mask][:, 0:7], gt_boxes[mask][:, 0:7]) + + loss = (1.0 - iou).sum() / torch.clamp(mask.sum(), min=1e-4) + return loss diff --git a/toolbox/openpcdet/pcdet/utils/object3d_custom.py b/toolbox/openpcdet/pcdet/utils/object3d_custom.py new file mode 100644 index 000000000..188d1bb0b --- /dev/null +++ b/toolbox/openpcdet/pcdet/utils/object3d_custom.py @@ -0,0 +1,83 @@ +import numpy as np + + +def get_objects_from_label(label_file): + with open(label_file, 'r') as f: + lines = f.readlines() + objects = [Object3d(line) for line in lines] + return objects + + +def cls_type_to_id(cls_type): + type_to_id = {'Car': 1, 'Pedestrian': 2, 'Cyclist': 3, 'Van': 4} + if cls_type not in type_to_id.keys(): + return -1 + return type_to_id[cls_type] + + +class Object3d(object): + def __init__(self, line): + label = line.strip().split(' ') + self.src = line + self.cls_type = label[0] + self.cls_id = cls_type_to_id(self.cls_type) + self.truncation = float(label[1]) + self.occlusion = float(label[2]) # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown + self.alpha = float(label[3]) + self.box2d = np.array((float(label[4]), float(label[5]), float(label[6]), float(label[7])), dtype=np.float32) + self.h = float(label[8]) + self.w = float(label[9]) + self.l = float(label[10]) + self.loc = np.array((float(label[11]), float(label[12]), float(label[13])), dtype=np.float32) + self.dis_to_cam = np.linalg.norm(self.loc) + self.ry = float(label[14]) + self.score = float(label[15]) if label.__len__() == 16 else -1.0 + self.level_str = None + self.level = self.get_custom_obj_level() + + def get_custom_obj_level(self): + height = float(self.box2d[3]) - float(self.box2d[1]) + 1 + + if height >= 40 and self.truncation <= 0.15 and self.occlusion <= 0: + self.level_str = 'Easy' + return 0 # Easy + elif height >= 25 and self.truncation <= 0.3 and self.occlusion <= 1: + self.level_str = 'Moderate' + return 1 # Moderate + elif height >= 25 and self.truncation <= 0.5 and self.occlusion <= 2: + self.level_str = 'Hard' + return 2 # Hard + else: + self.level_str = 'UnKnown' + return -1 + + def generate_corners3d(self): + """ + generate corners3d representation for this object + :return corners_3d: (8, 3) corners of box3d in camera coord + """ + l, h, w = self.l, self.h, self.w + x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2] + y_corners = [0, 0, 0, 0, -h, -h, -h, -h] + z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2] + + R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)], + [0, 1, 0], + [-np.sin(self.ry), 0, np.cos(self.ry)]]) + corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8) + corners3d = np.dot(R, corners3d).T + corners3d = corners3d + self.loc + return corners3d + + def to_str(self): + print_str = '%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f' \ + % (self.cls_type, self.truncation, self.occlusion, self.alpha, self.box2d, self.h, self.w, self.l, + self.loc, self.ry) + return print_str + + def to_custom_format(self): + custom_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \ + % (self.cls_type, self.truncation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1], + self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.loc[0], self.loc[1], self.loc[2], + self.ry) + return custom_str diff --git a/toolbox/openpcdet/pcdet/utils/object3d_kitti.py b/toolbox/openpcdet/pcdet/utils/object3d_kitti.py new file mode 100644 index 000000000..10528f604 --- /dev/null +++ b/toolbox/openpcdet/pcdet/utils/object3d_kitti.py @@ -0,0 +1,83 @@ +import numpy as np + + +def get_objects_from_label(label_file): + with open(label_file, 'r') as f: + lines = f.readlines() + objects = [Object3d(line) for line in lines] + return objects + + +def cls_type_to_id(cls_type): + type_to_id = {'Car': 1, 'Pedestrian': 2, 'Cyclist': 3, 'Van': 4} + if cls_type not in type_to_id.keys(): + return -1 + return type_to_id[cls_type] + + +class Object3d(object): + def __init__(self, line): + label = line.strip().split(' ') + self.src = line + self.cls_type = label[0] + self.cls_id = cls_type_to_id(self.cls_type) + self.truncation = float(label[1]) + self.occlusion = float(label[2]) # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown + self.alpha = float(label[3]) + self.box2d = np.array((float(label[4]), float(label[5]), float(label[6]), float(label[7])), dtype=np.float32) + self.h = float(label[8]) + self.w = float(label[9]) + self.l = float(label[10]) + self.loc = np.array((float(label[11]), float(label[12]), float(label[13])), dtype=np.float32) + self.dis_to_cam = np.linalg.norm(self.loc) + self.ry = float(label[14]) + self.score = float(label[15]) if label.__len__() == 16 else -1.0 + self.level_str = None + self.level = self.get_kitti_obj_level() + + def get_kitti_obj_level(self): + height = float(self.box2d[3]) - float(self.box2d[1]) + 1 + + if height >= 40 and self.truncation <= 0.15 and self.occlusion <= 0: + self.level_str = 'Easy' + return 0 # Easy + elif height >= 25 and self.truncation <= 0.3 and self.occlusion <= 1: + self.level_str = 'Moderate' + return 1 # Moderate + elif height >= 25 and self.truncation <= 0.5 and self.occlusion <= 2: + self.level_str = 'Hard' + return 2 # Hard + else: + self.level_str = 'UnKnown' + return -1 + + def generate_corners3d(self): + """ + generate corners3d representation for this object + :return corners_3d: (8, 3) corners of box3d in camera coord + """ + l, h, w = self.l, self.h, self.w + x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2] + y_corners = [0, 0, 0, 0, -h, -h, -h, -h] + z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2] + + R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)], + [0, 1, 0], + [-np.sin(self.ry), 0, np.cos(self.ry)]]) + corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8) + corners3d = np.dot(R, corners3d).T + corners3d = corners3d + self.loc + return corners3d + + def to_str(self): + print_str = '%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f' \ + % (self.cls_type, self.truncation, self.occlusion, self.alpha, self.box2d, self.h, self.w, self.l, + self.loc, self.ry) + return print_str + + def to_kitti_format(self): + kitti_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \ + % (self.cls_type, self.truncation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1], + self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.loc[0], self.loc[1], self.loc[2], + self.ry) + return kitti_str diff --git a/toolbox/openpcdet/pcdet/utils/spconv_utils.py b/toolbox/openpcdet/pcdet/utils/spconv_utils.py new file mode 100644 index 000000000..1182a106f --- /dev/null +++ b/toolbox/openpcdet/pcdet/utils/spconv_utils.py @@ -0,0 +1,38 @@ +from typing import Set + +import spconv +# if float(spconv.__version__[2:]) >= 2.2: +# spconv.constants.SPCONV_USE_DIRECT_TABLE = False + +try: + import spconv.pytorch as spconv +except: + import spconv as spconv + +import torch.nn as nn + + +def find_all_spconv_keys(model: nn.Module, prefix="") -> Set[str]: + """ + Finds all spconv keys that need to have weight's transposed + """ + found_keys: Set[str] = set() + for name, child in model.named_children(): + new_prefix = f"{prefix}.{name}" if prefix != "" else name + + if isinstance(child, spconv.conv.SparseConvolution): + new_prefix = f"{new_prefix}.weight" + found_keys.add(new_prefix) + + found_keys.update(find_all_spconv_keys(child, prefix=new_prefix)) + + return found_keys + + +def replace_feature(out, new_features): + if "replace_feature" in out.__dir__(): + # spconv 2.x behaviour + return out.replace_feature(new_features) + else: + out.features = new_features + return out diff --git a/toolbox/openpcdet/pcdet/utils/transform_utils.py b/toolbox/openpcdet/pcdet/utils/transform_utils.py new file mode 100644 index 000000000..b7fdcc213 --- /dev/null +++ b/toolbox/openpcdet/pcdet/utils/transform_utils.py @@ -0,0 +1,91 @@ +import math +import torch + +try: + from kornia.geometry.conversions import ( + convert_points_to_homogeneous, + convert_points_from_homogeneous, + ) +except: + pass + # print('Warning: kornia is not installed. This package is only required by CaDDN') + + +def project_to_image(project, points): + """ + Project points to image + Args: + project [torch.tensor(..., 3, 4)]: Projection matrix + points [torch.Tensor(..., 3)]: 3D points + Returns: + points_img [torch.Tensor(..., 2)]: Points in image + points_depth [torch.Tensor(...)]: Depth of each point + """ + # Reshape tensors to expected shape + points = convert_points_to_homogeneous(points) + points = points.unsqueeze(dim=-1) + project = project.unsqueeze(dim=1) + + # Transform points to image and get depths + points_t = project @ points + points_t = points_t.squeeze(dim=-1) + points_img = convert_points_from_homogeneous(points_t) + points_depth = points_t[..., -1] - project[..., 2, 3] + + return points_img, points_depth + + +def normalize_coords(coords, shape): + """ + Normalize coordinates of a grid between [-1, 1] + Args: + coords: (..., 3), Coordinates in grid + shape: (3), Grid shape + Returns: + norm_coords: (.., 3), Normalized coordinates in grid + """ + min_n = -1 + max_n = 1 + shape = torch.flip(shape, dims=[0]) # Reverse ordering of shape + + # Subtract 1 since pixel indexing from [0, shape - 1] + norm_coords = coords / (shape - 1) * (max_n - min_n) + min_n + return norm_coords + + +def bin_depths(depth_map, mode, depth_min, depth_max, num_bins, target=False): + """ + Converts depth map into bin indices + Args: + depth_map: (H, W), Depth Map + mode: string, Discretiziation mode (See https://arxiv.org/pdf/2005.13423.pdf for more details) + UD: Uniform discretiziation + LID: Linear increasing discretiziation + SID: Spacing increasing discretiziation + depth_min: float, Minimum depth value + depth_max: float, Maximum depth value + num_bins: int, Number of depth bins + target: bool, Whether the depth bins indices will be used for a target tensor in loss comparison + Returns: + indices: (H, W), Depth bin indices + """ + if mode == "UD": + bin_size = (depth_max - depth_min) / num_bins + indices = ((depth_map - depth_min) / bin_size) + elif mode == "LID": + bin_size = 2 * (depth_max - depth_min) / (num_bins * (1 + num_bins)) + indices = -0.5 + 0.5 * torch.sqrt(1 + 8 * (depth_map - depth_min) / bin_size) + elif mode == "SID": + indices = num_bins * (torch.log(1 + depth_map) - math.log(1 + depth_min)) / \ + (math.log(1 + depth_max) - math.log(1 + depth_min)) + else: + raise NotImplementedError + + if target: + # Remove indicies outside of bounds + mask = (indices < 0) | (indices > num_bins) | (~torch.isfinite(indices)) + indices[mask] = num_bins + + # Convert to integer + indices = indices.type(torch.int64) + return indices diff --git a/toolbox/openpcdet/requirements.txt b/toolbox/openpcdet/requirements.txt new file mode 100644 index 000000000..86b7dda67 --- /dev/null +++ b/toolbox/openpcdet/requirements.txt @@ -0,0 +1,4 @@ +scikit-image +pyquaternion +easydict +tensorboardX diff --git a/toolbox/openpcdet/setup.py b/toolbox/openpcdet/setup.py new file mode 100644 index 000000000..183fee9c3 --- /dev/null +++ b/toolbox/openpcdet/setup.py @@ -0,0 +1,137 @@ +import os +import subprocess + +from setuptools import find_packages, setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + + +def get_git_commit_number(): + if not os.path.exists('.git'): + return '0000000' + + cmd_out = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE) + git_commit_number = cmd_out.stdout.decode('utf-8')[:7] + return git_commit_number + + +def make_cuda_ext(name, module, sources): + cuda_ext = CUDAExtension( + name='%s.%s' % (module, name), + sources=[os.path.join(*module.split('.'), src) for src in sources] + ) + return cuda_ext + + +def write_version_to_file(version, target_file): + with open(target_file, 'w') as f: + print('__version__ = "%s"' % version, file=f) + + +if __name__ == '__main__': + version = '0.6.0' + write_version_to_file(version, 'pcdet/version.py') + + setup( + name='pcdet', + version=version, + description='OpenPCDet is a general codebase for 3D object detection from point cloud', + install_requires=[ + 'numpy', + 'llvmlite', + 'numba', + 'tensorboardX', + 'easydict', + 'pyyaml', + 'scikit-image', + 'tqdm', + 'SharedArray', + # 'spconv', # spconv has different names depending on the cuda version + ], + + author='Shaoshuai Shi', + author_email='shaoshuaics@gmail.com', + license='Apache License 2.0', + packages=find_packages(exclude=['tools', 'data', 'output']), + cmdclass={ + 'build_ext': BuildExtension, + }, + ext_modules=[ + make_cuda_ext( + name='iou3d_nms_cuda', + module='pcdet.ops.iou3d_nms', + sources=[ + 'src/iou3d_cpu.cpp', + 'src/iou3d_nms_api.cpp', + 'src/iou3d_nms.cpp', + 'src/iou3d_nms_kernel.cu', + ] + ), + make_cuda_ext( + name='roiaware_pool3d_cuda', + module='pcdet.ops.roiaware_pool3d', + sources=[ + 'src/roiaware_pool3d.cpp', + 'src/roiaware_pool3d_kernel.cu', + ] + ), + make_cuda_ext( + name='roipoint_pool3d_cuda', + module='pcdet.ops.roipoint_pool3d', + sources=[ + 'src/roipoint_pool3d.cpp', + 'src/roipoint_pool3d_kernel.cu', + ] + ), + make_cuda_ext( + name='pointnet2_stack_cuda', + module='pcdet.ops.pointnet2.pointnet2_stack', + sources=[ + 'src/pointnet2_api.cpp', + 'src/ball_query.cpp', + 'src/ball_query_gpu.cu', + 'src/group_points.cpp', + 'src/group_points_gpu.cu', + 'src/sampling.cpp', + 'src/sampling_gpu.cu', + 'src/interpolate.cpp', + 'src/interpolate_gpu.cu', + 'src/voxel_query.cpp', + 'src/voxel_query_gpu.cu', + 'src/vector_pool.cpp', + 'src/vector_pool_gpu.cu' + ], + ), + make_cuda_ext( + name='pointnet2_batch_cuda', + module='pcdet.ops.pointnet2.pointnet2_batch', + sources=[ + 'src/pointnet2_api.cpp', + 'src/ball_query.cpp', + 'src/ball_query_gpu.cu', + 'src/group_points.cpp', + 'src/group_points_gpu.cu', + 'src/interpolate.cpp', + 'src/interpolate_gpu.cu', + 'src/sampling.cpp', + 'src/sampling_gpu.cu', + + ], + ), + make_cuda_ext( + name="bev_pool_ext", + module="pcdet.ops.bev_pool", + sources=[ + "src/bev_pool.cpp", + "src/bev_pool_cuda.cu", + ], + ), + make_cuda_ext( + name='ingroup_inds_cuda', + module='pcdet.ops.ingroup_inds', + sources=[ + 'src/ingroup_inds.cpp', + 'src/ingroup_inds_kernel.cu', + ] + ), + ], + ) diff --git a/toolbox/openpcdet/tools/_init_path.py b/toolbox/openpcdet/tools/_init_path.py new file mode 100644 index 000000000..9fc2af401 --- /dev/null +++ b/toolbox/openpcdet/tools/_init_path.py @@ -0,0 +1,2 @@ +import sys +sys.path.insert(0, '../') \ No newline at end of file diff --git a/toolbox/openpcdet/tools/demo.py b/toolbox/openpcdet/tools/demo.py new file mode 100644 index 000000000..259d469f3 --- /dev/null +++ b/toolbox/openpcdet/tools/demo.py @@ -0,0 +1,112 @@ +import argparse +import glob +from pathlib import Path + +try: + import open3d + from visual_utils import open3d_vis_utils as V + OPEN3D_FLAG = True +except: + import mayavi.mlab as mlab + from visual_utils import visualize_utils as V + OPEN3D_FLAG = False + +import numpy as np +import torch + +from pcdet.config import cfg, cfg_from_yaml_file +from pcdet.datasets import DatasetTemplate +from pcdet.models import build_network, load_data_to_gpu +from pcdet.utils import common_utils + + +class DemoDataset(DatasetTemplate): + def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None, ext='.bin'): + """ + Args: + root_path: + dataset_cfg: + class_names: + training: + logger: + """ + super().__init__( + dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger + ) + self.root_path = root_path + self.ext = ext + data_file_list = glob.glob(str(root_path / f'*{self.ext}')) if self.root_path.is_dir() else [self.root_path] + + data_file_list.sort() + self.sample_file_list = data_file_list + + def __len__(self): + return len(self.sample_file_list) + + def __getitem__(self, index): + if self.ext == '.bin': + points = np.fromfile(self.sample_file_list[index], dtype=np.float32).reshape(-1, 4) + elif self.ext == '.npy': + points = np.load(self.sample_file_list[index]) + else: + raise NotImplementedError + + input_dict = { + 'points': points, + 'frame_id': index, + } + + data_dict = self.prepare_data(data_dict=input_dict) + return data_dict + + +def parse_config(): + parser = argparse.ArgumentParser(description='arg parser') + parser.add_argument('--cfg_file', type=str, default='cfgs/kitti_models/second.yaml', + help='specify the config for demo') + parser.add_argument('--data_path', type=str, default='demo_data', + help='specify the point cloud data file or directory') + parser.add_argument('--ckpt', type=str, default=None, help='specify the pretrained model') + parser.add_argument('--ext', type=str, default='.bin', help='specify the extension of your point cloud data file') + + args = parser.parse_args() + + cfg_from_yaml_file(args.cfg_file, cfg) + + return args, cfg + + +def main(): + args, cfg = parse_config() + logger = common_utils.create_logger() + logger.info('-----------------Quick Demo of OpenPCDet-------------------------') + demo_dataset = DemoDataset( + dataset_cfg=cfg.DATA_CONFIG, class_names=cfg.CLASS_NAMES, training=False, + root_path=Path(args.data_path), ext=args.ext, logger=logger + ) + logger.info(f'Total number of samples: \t{len(demo_dataset)}') + + model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=demo_dataset) + model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=True) + model.cuda() + model.eval() + with torch.no_grad(): + for idx, data_dict in enumerate(demo_dataset): + logger.info(f'Visualized sample index: \t{idx + 1}') + data_dict = demo_dataset.collate_batch([data_dict]) + load_data_to_gpu(data_dict) + pred_dicts, _ = model.forward(data_dict) + + V.draw_scenes( + points=data_dict['points'][:, 1:], ref_boxes=pred_dicts[0]['pred_boxes'], + ref_scores=pred_dicts[0]['pred_scores'], ref_labels=pred_dicts[0]['pred_labels'] + ) + + if not OPEN3D_FLAG: + mlab.show(stop=True) + + logger.info('Demo done.') + + +if __name__ == '__main__': + main() diff --git a/toolbox/openpcdet/tools/eval_utils/eval_utils.py b/toolbox/openpcdet/tools/eval_utils/eval_utils.py new file mode 100644 index 000000000..92abd2551 --- /dev/null +++ b/toolbox/openpcdet/tools/eval_utils/eval_utils.py @@ -0,0 +1,140 @@ +import pickle +import time + +import numpy as np +import torch +import tqdm + +from pcdet.models import load_data_to_gpu +from pcdet.utils import common_utils + + +def statistics_info(cfg, ret_dict, metric, disp_dict): + for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST: + metric['recall_roi_%s' % str(cur_thresh)] += ret_dict.get('roi_%s' % str(cur_thresh), 0) + metric['recall_rcnn_%s' % str(cur_thresh)] += ret_dict.get('rcnn_%s' % str(cur_thresh), 0) + metric['gt_num'] += ret_dict.get('gt', 0) + min_thresh = cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST[0] + disp_dict['recall_%s' % str(min_thresh)] = \ + '(%d, %d) / %d' % (metric['recall_roi_%s' % str(min_thresh)], metric['recall_rcnn_%s' % str(min_thresh)], metric['gt_num']) + + +def eval_one_epoch(cfg, args, model, dataloader, epoch_id, logger, dist_test=False, result_dir=None): + result_dir.mkdir(parents=True, exist_ok=True) + + final_output_dir = result_dir / 'final_result' / 'data' + if args.save_to_file: + final_output_dir.mkdir(parents=True, exist_ok=True) + + metric = { + 'gt_num': 0, + } + for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST: + metric['recall_roi_%s' % str(cur_thresh)] = 0 + metric['recall_rcnn_%s' % str(cur_thresh)] = 0 + + dataset = dataloader.dataset + class_names = dataset.class_names + det_annos = [] + + if getattr(args, 'infer_time', False): + start_iter = int(len(dataloader) * 0.1) + infer_time_meter = common_utils.AverageMeter() + + logger.info('*************** EPOCH %s EVALUATION *****************' % epoch_id) + if dist_test: + num_gpus = torch.cuda.device_count() + local_rank = cfg.LOCAL_RANK % num_gpus + model = torch.nn.parallel.DistributedDataParallel( + model, + device_ids=[local_rank], + broadcast_buffers=False + ) + model.eval() + + if cfg.LOCAL_RANK == 0: + progress_bar = tqdm.tqdm(total=len(dataloader), leave=True, desc='eval', dynamic_ncols=True) + start_time = time.time() + for i, batch_dict in enumerate(dataloader): + load_data_to_gpu(batch_dict) + + if getattr(args, 'infer_time', False): + start_time = time.time() + + with torch.no_grad(): + pred_dicts, ret_dict = model(batch_dict) + + disp_dict = {} + + if getattr(args, 'infer_time', False): + inference_time = time.time() - start_time + infer_time_meter.update(inference_time * 1000) + # use ms to measure inference time + disp_dict['infer_time'] = f'{infer_time_meter.val:.2f}({infer_time_meter.avg:.2f})' + + statistics_info(cfg, ret_dict, metric, disp_dict) + annos = dataset.generate_prediction_dicts( + batch_dict, pred_dicts, class_names, + output_path=final_output_dir if args.save_to_file else None + ) + det_annos += annos + if cfg.LOCAL_RANK == 0: + progress_bar.set_postfix(disp_dict) + progress_bar.update() + + if cfg.LOCAL_RANK == 0: + progress_bar.close() + + if dist_test: + rank, world_size = common_utils.get_dist_info() + det_annos = common_utils.merge_results_dist(det_annos, len(dataset), tmpdir=result_dir / 'tmpdir') + metric = common_utils.merge_results_dist([metric], world_size, tmpdir=result_dir / 'tmpdir') + + logger.info('*************** Performance of EPOCH %s *****************' % epoch_id) + sec_per_example = (time.time() - start_time) / len(dataloader.dataset) + logger.info('Generate label finished(sec_per_example: %.4f second).' % sec_per_example) + + if cfg.LOCAL_RANK != 0: + return {} + + ret_dict = {} + if dist_test: + for key, val in metric[0].items(): + for k in range(1, world_size): + metric[0][key] += metric[k][key] + metric = metric[0] + + gt_num_cnt = metric['gt_num'] + for cur_thresh in cfg.MODEL.POST_PROCESSING.RECALL_THRESH_LIST: + cur_roi_recall = metric['recall_roi_%s' % str(cur_thresh)] / max(gt_num_cnt, 1) + cur_rcnn_recall = metric['recall_rcnn_%s' % str(cur_thresh)] / max(gt_num_cnt, 1) + logger.info('recall_roi_%s: %f' % (cur_thresh, cur_roi_recall)) + logger.info('recall_rcnn_%s: %f' % (cur_thresh, cur_rcnn_recall)) + ret_dict['recall/roi_%s' % str(cur_thresh)] = cur_roi_recall + ret_dict['recall/rcnn_%s' % str(cur_thresh)] = cur_rcnn_recall + + total_pred_objects = 0 + for anno in det_annos: + total_pred_objects += anno['name'].__len__() + logger.info('Average predicted number of objects(%d samples): %.3f' + % (len(det_annos), total_pred_objects / max(1, len(det_annos)))) + + with open(result_dir / 'result.pkl', 'wb') as f: + pickle.dump(det_annos, f) + + result_str, result_dict = dataset.evaluation( + det_annos, class_names, + eval_metric=cfg.MODEL.POST_PROCESSING.EVAL_METRIC, + output_path=final_output_dir + ) + + logger.info(result_str) + ret_dict.update(result_dict) + + logger.info('Result is saved to %s' % result_dir) + logger.info('****************Evaluation done.*****************') + return ret_dict + + +if __name__ == '__main__': + pass diff --git a/toolbox/openpcdet/tools/process_tools/create_integrated_database.py b/toolbox/openpcdet/tools/process_tools/create_integrated_database.py new file mode 100644 index 000000000..59f7b5381 --- /dev/null +++ b/toolbox/openpcdet/tools/process_tools/create_integrated_database.py @@ -0,0 +1,86 @@ +import numpy as np +import pickle as pkl +from pathlib import Path +import tqdm +import copy +import os + + +def create_integrated_db_with_infos(args, root_path): + """ + Args: + args: + Returns: + + """ + # prepare + db_infos_path = args.src_db_info + db_info_global_path = db_infos_path + global_db_path = root_path / (args.new_db_name + '.npy') + + db_infos = pkl.load(open(db_infos_path, 'rb')) + db_info_global = copy.deepcopy(db_infos) + start_idx = 0 + global_db_list = [] + + for category, class_info in db_infos.items(): + print('>>> Start processing %s' % category) + for idx, info in tqdm.tqdm(enumerate(class_info), total=len(class_info)): + obj_path = root_path / info['path'] + obj_points = np.fromfile(str(obj_path), dtype=np.float32).reshape( + [-1, args.num_point_features]) + num_points = obj_points.shape[0] + if num_points != info['num_points_in_gt']: + obj_points = np.fromfile(str(obj_path), dtype=np.float64).reshape([-1, args.num_point_features]) + num_points = obj_points.shape[0] + obj_points = obj_points.astype(np.float32) + assert num_points == info['num_points_in_gt'] + + db_info_global[category][idx]['global_data_offset'] = (start_idx, start_idx + num_points) + start_idx += num_points + global_db_list.append(obj_points) + + global_db = np.concatenate(global_db_list) + + with open(global_db_path, 'wb') as f: + np.save(f, global_db) + + with open(db_info_global_path, 'wb') as f: + pkl.dump(db_info_global, f) + + print(f"Successfully create integrated database at {global_db_path}") + print(f"Successfully create integrated database info at {db_info_global_path}") + + return db_info_global, global_db + + +def verify(info, whole_db, root_path, num_point_features): + obj_path = root_path / info['path'] + obj_points = np.fromfile(str(obj_path), dtype=np.float32).reshape([-1, num_point_features]) + mean_origin = obj_points.mean() + + start_idx, end_idx = info['global_data_offset'] + obj_points_new = whole_db[start_idx:end_idx] + mean_new = obj_points_new.mean() + + assert mean_origin == mean_new + + print("Verification pass!") + + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser(description='arg parser') + parser.add_argument('--src_db_info', type=str, default='../../data/waymo/waymo_processed_data_v0_5_0_waymo_dbinfos_train_sampled_1_multiframe_-4_to_0_tail_parallel.pkl', help='') + parser.add_argument('--new_db_name', type=str, default='waymo_processed_data_v0_5_0_gt_database_train_sampled_1_multiframe_-4_to_0_tail_parallel_global', help='') + parser.add_argument('--num_point_features', type=int, default=6, help='number of feature channels for points') + parser.add_argument('--class_name', type=str, default='Vehicle', help='category name for verification') + + args = parser.parse_args() + + root_path = Path(os.path.dirname(args.src_db_info)) + + db_infos_global, whole_db = create_integrated_db_with_infos(args, root_path) + # simple verify + verify(db_infos_global[args.class_name][0], whole_db, root_path, args.num_point_features) diff --git a/toolbox/openpcdet/tools/scripts/slurm_test_mgpu.sh b/toolbox/openpcdet/tools/scripts/slurm_test_mgpu.sh new file mode 100755 index 000000000..7e1a1d687 --- /dev/null +++ b/toolbox/openpcdet/tools/scripts/slurm_test_mgpu.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +GPUS=$2 +GPUS_PER_NODE=$GPUS +PY_ARGS=${@:3} +JOB_NAME=eval +SRUN_ARGS=${SRUN_ARGS:-""} + +while true +do + PORT=$(( ((RANDOM<<15)|RANDOM) % 49152 + 10000 )) + status="$(nc -z 127.0.0.1 $PORT < /dev/null &>/dev/null; echo $?)" + if [ "${status}" != "0" ]; then + break; + fi +done +echo $PORT + +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u test.py --launcher slurm --tcp_port $PORT ${PY_ARGS} + diff --git a/toolbox/openpcdet/tools/scripts/slurm_test_single.sh b/toolbox/openpcdet/tools/scripts/slurm_test_single.sh new file mode 100644 index 000000000..2dea3ceaf --- /dev/null +++ b/toolbox/openpcdet/tools/scripts/slurm_test_single.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +GPUS=1 +GPUS_PER_NODE=1 +PY_ARGS=${@:2} +JOB_NAME=eval +SRUN_ARGS=${SRUN_ARGS:-""} + +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u test.py ${PY_ARGS} diff --git a/toolbox/openpcdet/tools/scripts/slurm_train.sh b/toolbox/openpcdet/tools/scripts/slurm_train.sh new file mode 100644 index 000000000..7a4533f06 --- /dev/null +++ b/toolbox/openpcdet/tools/scripts/slurm_train.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +GPUS=$3 +PY_ARGS=${@:4} + +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} + +while true +do + PORT=$(( ((RANDOM<<15)|RANDOM) % 49152 + 10000 )) + status="$(nc -z 127.0.0.1 $PORT < /dev/null &>/dev/null; echo $?)" + if [ "${status}" != "0" ]; then + break; + fi +done +echo $PORT + +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u train.py --launcher slurm --tcp_port $PORT ${PY_ARGS} diff --git a/toolbox/openpcdet/tools/scripts/slurm_train_v2.sh b/toolbox/openpcdet/tools/scripts/slurm_train_v2.sh new file mode 100644 index 000000000..d5ec0ed2f --- /dev/null +++ b/toolbox/openpcdet/tools/scripts/slurm_train_v2.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +GPUS=$3 +PY_ARGS=${@:4} + +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-40} +SRUN_ARGS=${SRUN_ARGS:-""} + +while true +do + PORT=$(( ((RANDOM<<15)|RANDOM) % 49152 + 10000 )) + status="$(nc -z 127.0.0.1 $PORT < /dev/null &>/dev/null; echo $?)" + if [ "${status}" != "0" ]; then + break; + fi +done +echo $PORT + +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -m torch.distributed.launch --nproc_per_node=${GPUS_PER_NODE} train.py --launcher pytorch --tcp_port ${PORT} ${PY_ARGS} diff --git a/toolbox/openpcdet/tools/scripts/torch_train.sh b/toolbox/openpcdet/tools/scripts/torch_train.sh new file mode 100644 index 000000000..59410491c --- /dev/null +++ b/toolbox/openpcdet/tools/scripts/torch_train.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -x +NGPUS=$1 +PY_ARGS=${@:2} + +while true +do + PORT=$(( ((RANDOM<<15)|RANDOM) % 49152 + 10000 )) + status="$(nc -z 127.0.0.1 $PORT < /dev/null &>/dev/null; echo $?)" + if [ "${status}" != "0" ]; then + break; + fi +done +echo $PORT + +torchrun --nproc_per_node=${NGPUS} --rdzv_endpoint=localhost:${PORT} train.py --launcher pytorch ${PY_ARGS} + diff --git a/toolbox/openpcdet/tools/test.py b/toolbox/openpcdet/tools/test.py new file mode 100644 index 000000000..51b7178c6 --- /dev/null +++ b/toolbox/openpcdet/tools/test.py @@ -0,0 +1,207 @@ +import _init_path +import argparse +import datetime +import glob +import os +import re +import time +from pathlib import Path + +import numpy as np +import torch +from tensorboardX import SummaryWriter + +from eval_utils import eval_utils +from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file +from pcdet.datasets import build_dataloader +from pcdet.models import build_network +from pcdet.utils import common_utils + + +def parse_config(): + parser = argparse.ArgumentParser(description='arg parser') + parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training') + + parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training') + parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader') + parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment') + parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from') + parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model') + parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none') + parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training') + parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training') + parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER, + help='set extra config keys if needed') + + parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes') + parser.add_argument('--start_epoch', type=int, default=0, help='') + parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment') + parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints') + parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed') + parser.add_argument('--save_to_file', action='store_true', default=False, help='') + parser.add_argument('--infer_time', action='store_true', default=False, help='calculate inference latency') + + args = parser.parse_args() + + cfg_from_yaml_file(args.cfg_file, cfg) + cfg.TAG = Path(args.cfg_file).stem + cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml' + + np.random.seed(1024) + + if args.set_cfgs is not None: + cfg_from_list(args.set_cfgs, cfg) + + return args, cfg + + +def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False): + # load checkpoint + model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test, + pre_trained_path=args.pretrained_model) + model.cuda() + + # start evaluation + eval_utils.eval_one_epoch( + cfg, args, model, test_loader, epoch_id, logger, dist_test=dist_test, + result_dir=eval_output_dir + ) + + +def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args): + ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth')) + ckpt_list.sort(key=os.path.getmtime) + evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()] + + for cur_ckpt in ckpt_list: + num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt) + if num_list.__len__() == 0: + continue + + epoch_id = num_list[-1] + if 'optim' in epoch_id: + continue + if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch: + return epoch_id, cur_ckpt + return -1, None + + +def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False): + # evaluated ckpt record + ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test']) + with open(ckpt_record_file, 'a'): + pass + + # tensorboard log + if cfg.LOCAL_RANK == 0: + tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test']))) + total_time = 0 + first_eval = True + + while True: + # check whether there is checkpoint which is not evaluated + cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args) + if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch: + wait_second = 30 + if cfg.LOCAL_RANK == 0: + print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \r' + % (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True) + time.sleep(wait_second) + total_time += 30 + if total_time > args.max_waiting_mins * 60 and (first_eval is False): + break + continue + + total_time = 0 + first_eval = False + + model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test) + model.cuda() + + # start evaluation + cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test'] + tb_dict = eval_utils.eval_one_epoch( + cfg, args, model, test_loader, cur_epoch_id, logger, dist_test=dist_test, + result_dir=cur_result_dir + ) + + if cfg.LOCAL_RANK == 0: + for key, val in tb_dict.items(): + tb_log.add_scalar(key, val, cur_epoch_id) + + # record this epoch which has been evaluated + with open(ckpt_record_file, 'a') as f: + print('%s' % cur_epoch_id, file=f) + logger.info('Epoch %s has been evaluated' % cur_epoch_id) + + +def main(): + args, cfg = parse_config() + + if args.infer_time: + os.environ['CUDA_LAUNCH_BLOCKING'] = '1' + + if args.launcher == 'none': + dist_test = False + total_gpus = 1 + else: + total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)( + args.tcp_port, args.local_rank, backend='nccl' + ) + dist_test = True + + if args.batch_size is None: + args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU + else: + assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus' + args.batch_size = args.batch_size // total_gpus + + output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag + output_dir.mkdir(parents=True, exist_ok=True) + + eval_output_dir = output_dir / 'eval' + + if not args.eval_all: + num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else [] + epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number' + eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test'] + else: + eval_output_dir = eval_output_dir / 'eval_all_default' + + if args.eval_tag is not None: + eval_output_dir = eval_output_dir / args.eval_tag + + eval_output_dir.mkdir(parents=True, exist_ok=True) + log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S')) + logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK) + + # log to file + logger.info('**********************Start logging**********************') + gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL' + logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list) + + if dist_test: + logger.info('total_batch_size: %d' % (total_gpus * args.batch_size)) + for key, val in vars(args).items(): + logger.info('{:16} {}'.format(key, val)) + log_config_to_file(cfg, logger=logger) + + ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt' + + test_set, test_loader, sampler = build_dataloader( + dataset_cfg=cfg.DATA_CONFIG, + class_names=cfg.CLASS_NAMES, + batch_size=args.batch_size, + dist=dist_test, workers=args.workers, logger=logger, training=False + ) + + model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set) + with torch.no_grad(): + if args.eval_all: + repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test) + else: + eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test) + + +if __name__ == '__main__': + main() diff --git a/toolbox/openpcdet/tools/train.py b/toolbox/openpcdet/tools/train.py new file mode 100644 index 000000000..54e524086 --- /dev/null +++ b/toolbox/openpcdet/tools/train.py @@ -0,0 +1,230 @@ +import _init_path +import argparse +import datetime +import glob +import os +from pathlib import Path +from test import repeat_eval_ckpt + +import torch +import torch.nn as nn +from tensorboardX import SummaryWriter + +from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file +from pcdet.datasets import build_dataloader +from pcdet.models import build_network, model_fn_decorator +from pcdet.utils import common_utils +from train_utils.optimization import build_optimizer, build_scheduler +from train_utils.train_utils import train_model + + +def parse_config(): + parser = argparse.ArgumentParser(description='arg parser') + parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training') + + parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training') + parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for') + parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader') + parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment') + parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from') + parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model') + parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none') + parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training') + parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn') + parser.add_argument('--fix_random_seed', action='store_true', default=False, help='') + parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs') + parser.add_argument('--local-rank', type=int, default=0, help='local rank for distributed training') + parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint') + parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='') + parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER, + help='set extra config keys if needed') + + parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes') + parser.add_argument('--start_epoch', type=int, default=0, help='') + parser.add_argument('--num_epochs_to_eval', type=int, default=0, help='number of checkpoints to be evaluated') + parser.add_argument('--save_to_file', action='store_true', default=False, help='') + + parser.add_argument('--use_tqdm_to_record', action='store_true', default=False, help='if True, the intermediate losses will not be logged to file, only tqdm will be used') + parser.add_argument('--logger_iter_interval', type=int, default=50, help='') + parser.add_argument('--ckpt_save_time_interval', type=int, default=300, help='in terms of seconds') + parser.add_argument('--wo_gpu_stat', action='store_true', help='') + parser.add_argument('--use_amp', action='store_true', help='use mix precision training') + + + args = parser.parse_args() + + cfg_from_yaml_file(args.cfg_file, cfg) + cfg.TAG = Path(args.cfg_file).stem + cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml' + + args.use_amp = args.use_amp or cfg.OPTIMIZATION.get('USE_AMP', False) + + if args.set_cfgs is not None: + cfg_from_list(args.set_cfgs, cfg) + + return args, cfg + + +def main(): + args, cfg = parse_config() + if args.launcher == 'none': + dist_train = False + total_gpus = 1 + else: + total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)( + args.tcp_port, args.local_rank, backend='nccl' + ) + dist_train = True + + if args.batch_size is None: + args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU + else: + assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus' + args.batch_size = args.batch_size // total_gpus + + args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs + + if args.fix_random_seed: + common_utils.set_random_seed(666 + cfg.LOCAL_RANK) + + output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag + ckpt_dir = output_dir / 'ckpt' + output_dir.mkdir(parents=True, exist_ok=True) + ckpt_dir.mkdir(parents=True, exist_ok=True) + + log_file = output_dir / ('train_%s.log' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S')) + logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK) + + # log to file + logger.info('**********************Start logging**********************') + gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL' + logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list) + + if dist_train: + logger.info('Training in distributed mode : total_batch_size: %d' % (total_gpus * args.batch_size)) + else: + logger.info('Training with a single process') + + for key, val in vars(args).items(): + logger.info('{:16} {}'.format(key, val)) + log_config_to_file(cfg, logger=logger) + if cfg.LOCAL_RANK == 0: + os.system('cp %s %s' % (args.cfg_file, output_dir)) + + tb_log = SummaryWriter(log_dir=str(output_dir / 'tensorboard')) if cfg.LOCAL_RANK == 0 else None + + logger.info("----------- Create dataloader & network & optimizer -----------") + train_set, train_loader, train_sampler = build_dataloader( + dataset_cfg=cfg.DATA_CONFIG, + class_names=cfg.CLASS_NAMES, + batch_size=args.batch_size, + dist=dist_train, workers=args.workers, + logger=logger, + training=True, + merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch, + total_epochs=args.epochs, + seed=666 if args.fix_random_seed else None + ) + + model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=train_set) + if args.sync_bn: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + model.cuda() + + optimizer = build_optimizer(model, cfg.OPTIMIZATION) + + # load checkpoint if it is possible + start_epoch = it = 0 + last_epoch = -1 + if args.pretrained_model is not None: + model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger) + + if args.ckpt is not None: + it, start_epoch = model.load_params_with_optimizer(args.ckpt, to_cpu=dist_train, optimizer=optimizer, logger=logger) + last_epoch = start_epoch + 1 + else: + ckpt_list = glob.glob(str(ckpt_dir / '*.pth')) + + if len(ckpt_list) > 0: + ckpt_list.sort(key=os.path.getmtime) + while len(ckpt_list) > 0: + try: + it, start_epoch = model.load_params_with_optimizer( + ckpt_list[-1], to_cpu=dist_train, optimizer=optimizer, logger=logger + ) + last_epoch = start_epoch + 1 + break + except: + ckpt_list = ckpt_list[:-1] + + model.train() # before wrap to DistributedDataParallel to support fixed some parameters + if dist_train: + model = nn.parallel.DistributedDataParallel(model, device_ids=[cfg.LOCAL_RANK % torch.cuda.device_count()]) + logger.info(f'----------- Model {cfg.MODEL.NAME} created, param count: {sum([m.numel() for m in model.parameters()])} -----------') + logger.info(model) + + lr_scheduler, lr_warmup_scheduler = build_scheduler( + optimizer, total_iters_each_epoch=len(train_loader), total_epochs=args.epochs, + last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION + ) + + # -----------------------start training--------------------------- + logger.info('**********************Start training %s/%s(%s)**********************' + % (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag)) + + train_model( + model, + optimizer, + train_loader, + model_func=model_fn_decorator(), + lr_scheduler=lr_scheduler, + optim_cfg=cfg.OPTIMIZATION, + start_epoch=start_epoch, + total_epochs=args.epochs, + start_iter=it, + rank=cfg.LOCAL_RANK, + tb_log=tb_log, + ckpt_save_dir=ckpt_dir, + train_sampler=train_sampler, + lr_warmup_scheduler=lr_warmup_scheduler, + ckpt_save_interval=args.ckpt_save_interval, + max_ckpt_save_num=args.max_ckpt_save_num, + merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch, + logger=logger, + logger_iter_interval=args.logger_iter_interval, + ckpt_save_time_interval=args.ckpt_save_time_interval, + use_logger_to_record=not args.use_tqdm_to_record, + show_gpu_stat=not args.wo_gpu_stat, + use_amp=args.use_amp, + cfg=cfg + ) + + if hasattr(train_set, 'use_shared_memory') and train_set.use_shared_memory: + train_set.clean_shared_memory() + + logger.info('**********************End training %s/%s(%s)**********************\n\n\n' + % (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag)) + + logger.info('**********************Start evaluation %s/%s(%s)**********************' % + (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag)) + test_set, test_loader, sampler = build_dataloader( + dataset_cfg=cfg.DATA_CONFIG, + class_names=cfg.CLASS_NAMES, + batch_size=args.batch_size, + dist=dist_train, workers=args.workers, logger=logger, training=False + ) + eval_output_dir = output_dir / 'eval' / 'eval_with_train' + eval_output_dir.mkdir(parents=True, exist_ok=True) + args.start_epoch = max(args.epochs - args.num_epochs_to_eval, 0) # Only evaluate the last args.num_epochs_to_eval epochs + + repeat_eval_ckpt( + model.module if dist_train else model, + test_loader, args, eval_output_dir, logger, ckpt_dir, + dist_test=dist_train + ) + logger.info('**********************End evaluation %s/%s(%s)**********************' % + (cfg.EXP_GROUP_PATH, cfg.TAG, args.extra_tag)) + + +if __name__ == '__main__': + main() diff --git a/toolbox/openpcdet/tools/train_utils/optimization/__init__.py b/toolbox/openpcdet/tools/train_utils/optimization/__init__.py new file mode 100644 index 000000000..888cfcf20 --- /dev/null +++ b/toolbox/openpcdet/tools/train_utils/optimization/__init__.py @@ -0,0 +1,68 @@ +from functools import partial + +import torch.nn as nn +import torch.optim as optim +import torch.optim.lr_scheduler as lr_sched + +from .fastai_optim import OptimWrapper +from .learning_schedules_fastai import CosineWarmupLR, OneCycle, CosineAnnealing + + +def build_optimizer(model, optim_cfg): + if optim_cfg.OPTIMIZER == 'adam': + optimizer = optim.Adam(model.parameters(), lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY) + elif optim_cfg.OPTIMIZER == 'sgd': + optimizer = optim.SGD( + model.parameters(), lr=optim_cfg.LR, weight_decay=optim_cfg.WEIGHT_DECAY, + momentum=optim_cfg.MOMENTUM + ) + elif optim_cfg.OPTIMIZER in ['adam_onecycle','adam_cosineanneal']: + def children(m: nn.Module): + return list(m.children()) + + def num_children(m: nn.Module) -> int: + return len(children(m)) + + flatten_model = lambda m: sum(map(flatten_model, m.children()), []) if num_children(m) else [m] + get_layer_groups = lambda m: [nn.Sequential(*flatten_model(m))] + betas = optim_cfg.get('BETAS', (0.9, 0.99)) + betas = tuple(betas) + optimizer_func = partial(optim.Adam, betas=betas) + optimizer = OptimWrapper.create( + optimizer_func, 3e-3, get_layer_groups(model), wd=optim_cfg.WEIGHT_DECAY, true_wd=True, bn_wd=True + ) + else: + raise NotImplementedError + + return optimizer + + +def build_scheduler(optimizer, total_iters_each_epoch, total_epochs, last_epoch, optim_cfg): + decay_steps = [x * total_iters_each_epoch for x in optim_cfg.DECAY_STEP_LIST] + def lr_lbmd(cur_epoch): + cur_decay = 1 + for decay_step in decay_steps: + if cur_epoch >= decay_step: + cur_decay = cur_decay * optim_cfg.LR_DECAY + return max(cur_decay, optim_cfg.LR_CLIP / optim_cfg.LR) + + lr_warmup_scheduler = None + total_steps = total_iters_each_epoch * total_epochs + if optim_cfg.OPTIMIZER == 'adam_onecycle': + lr_scheduler = OneCycle( + optimizer, total_steps, optim_cfg.LR, list(optim_cfg.MOMS), optim_cfg.DIV_FACTOR, optim_cfg.PCT_START + ) + elif optim_cfg.OPTIMIZER == 'adam_cosineanneal': + lr_scheduler = CosineAnnealing( + optimizer, total_steps, total_epochs, optim_cfg.LR, list(optim_cfg.MOMS), optim_cfg.PCT_START, optim_cfg.WARMUP_ITER + ) + else: + lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd, last_epoch=last_epoch) + + if optim_cfg.LR_WARMUP: + lr_warmup_scheduler = CosineWarmupLR( + optimizer, T_max=optim_cfg.WARMUP_EPOCH * len(total_iters_each_epoch), + eta_min=optim_cfg.LR / optim_cfg.DIV_FACTOR + ) + + return lr_scheduler, lr_warmup_scheduler diff --git a/toolbox/openpcdet/tools/train_utils/optimization/fastai_optim.py b/toolbox/openpcdet/tools/train_utils/optimization/fastai_optim.py new file mode 100644 index 000000000..62909df40 --- /dev/null +++ b/toolbox/openpcdet/tools/train_utils/optimization/fastai_optim.py @@ -0,0 +1,264 @@ +# This file is modified from https://github.com/traveller59/second.pytorch + +try: + from collections.abc import Iterable +except: + from collections import Iterable + +import torch +from torch import nn +from torch._utils import _unflatten_dense_tensors +from torch.nn.utils import parameters_to_vector + +bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm) + + +def split_bn_bias(layer_groups): + "Split the layers in `layer_groups` into batchnorm (`bn_types`) and non-batchnorm groups." + split_groups = [] + for l in layer_groups: + l1, l2 = [], [] + for c in l.children(): + if isinstance(c, bn_types): + l2.append(c) + else: + l1.append(c) + split_groups += [nn.Sequential(*l1), nn.Sequential(*l2)] + return split_groups + + +def get_master(layer_groups, flat_master: bool = False): + "Return two lists, one for the model parameters in FP16 and one for the master parameters in FP32." + split_groups = split_bn_bias(layer_groups) + model_params = [[param for param in lg.parameters() if param.requires_grad] for lg in split_groups] + if flat_master: + master_params = [] + for lg in model_params: + if len(lg) != 0: + mp = parameters_to_vector([param.data.float() for param in lg]) + mp = torch.nn.Parameter(mp, requires_grad=True) + if mp.grad is None: mp.grad = mp.new(*mp.size()) + master_params.append([mp]) + else: + master_params.append([]) + return model_params, master_params + else: + master_params = [[param.clone().float().detach() for param in lg] for lg in model_params] + for mp in master_params: + for param in mp: param.requires_grad = True + return model_params, master_params + + +def model_g2master_g(model_params, master_params, flat_master: bool = False) -> None: + "Copy the `model_params` gradients to `master_params` for the optimizer step." + if flat_master: + for model_group, master_group in zip(model_params, master_params): + if len(master_group) != 0: + master_group[0].grad.data.copy_(parameters_to_vector([p.grad.data.float() for p in model_group])) + else: + for model_group, master_group in zip(model_params, master_params): + for model, master in zip(model_group, master_group): + if model.grad is not None: + if master.grad is None: master.grad = master.data.new(*master.data.size()) + master.grad.data.copy_(model.grad.data) + else: + master.grad = None + + +def master2model(model_params, master_params, flat_master: bool = False) -> None: + "Copy `master_params` to `model_params`." + if flat_master: + for model_group, master_group in zip(model_params, master_params): + if len(model_group) != 0: + for model, master in zip(model_group, _unflatten_dense_tensors(master_group[0].data, model_group)): + model.data.copy_(master) + else: + for model_group, master_group in zip(model_params, master_params): + for model, master in zip(model_group, master_group): model.data.copy_(master.data) + + +def listify(p=None, q=None): + "Make `p` listy and the same length as `q`." + if p is None: + p = [] + elif isinstance(p, str): + p = [p] + elif not isinstance(p, Iterable): + p = [p] + n = q if type(q) == int else len(p) if q is None else len(q) + if len(p) == 1: p = p * n + assert len(p) == n, f'List len mismatch ({len(p)} vs {n})' + return list(p) + + +def trainable_params(m: nn.Module): + "Return list of trainable params in `m`." + res = filter(lambda p: p.requires_grad, m.parameters()) + return res + + +def is_tuple(x) -> bool: return isinstance(x, tuple) + + +# copy from fastai. +class OptimWrapper(): + "Basic wrapper around `opt` to simplify hyper-parameters changes." + + def __init__(self, opt, wd, true_wd: bool = False, bn_wd: bool = True): + self.opt, self.true_wd, self.bn_wd = opt, true_wd, bn_wd + self.opt_keys = list(self.opt.param_groups[0].keys()) + self.opt_keys.remove('params') + self.read_defaults() + self.wd = wd + + @classmethod + def create(cls, opt_func, lr, + layer_groups, **kwargs): + "Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`." + split_groups = split_bn_bias(layer_groups) + opt = opt_func([{'params': trainable_params(l), 'lr': 0} for l in split_groups]) + opt = cls(opt, **kwargs) + opt.lr, opt.opt_func = listify(lr, layer_groups), opt_func + return opt + + def new(self, layer_groups): + "Create a new `OptimWrapper` from `self` with another `layer_groups` but the same hyper-parameters." + opt_func = getattr(self, 'opt_func', self.opt.__class__) + split_groups = split_bn_bias(layer_groups) + opt = opt_func([{'params': trainable_params(l), 'lr': 0} for l in split_groups]) + return self.create(opt_func, self.lr, layer_groups, wd=self.wd, true_wd=self.true_wd, bn_wd=self.bn_wd) + + def __repr__(self) -> str: + return f'OptimWrapper over {repr(self.opt)}.\nTrue weight decay: {self.true_wd}' + + # Pytorch optimizer methods + def step(self) -> None: + "Set weight decay and step optimizer." + # weight decay outside of optimizer step (AdamW) + if self.true_wd: + for lr, wd, pg1, pg2 in zip(self._lr, self._wd, self.opt.param_groups[::2], self.opt.param_groups[1::2]): + for p in pg1['params']: + # When some parameters are fixed: Shaoshuai Shi + if p.requires_grad is False: + continue + p.data.mul_(1 - wd * lr) + if self.bn_wd: + for p in pg2['params']: + # When some parameters are fixed: Shaoshuai Shi + if p.requires_grad is False: + continue + p.data.mul_(1 - wd * lr) + self.set_val('weight_decay', listify(0, self._wd)) + self.opt.step() + + def zero_grad(self) -> None: + "Clear optimizer gradients." + self.opt.zero_grad() + + # Passthrough to the inner opt. + def __getattr__(self, k: str): + return getattr(self.opt, k, None) + + def clear(self): + "Reset the state of the inner optimizer." + sd = self.state_dict() + sd['state'] = {} + self.load_state_dict(sd) + + # Hyperparameters as properties + @property + def lr(self) -> float: + return self._lr[-1] + + @lr.setter + def lr(self, val: float) -> None: + self._lr = self.set_val('lr', listify(val, self._lr)) + + @property + def mom(self) -> float: + return self._mom[-1] + + @mom.setter + def mom(self, val: float) -> None: + if 'momentum' in self.opt_keys: + self.set_val('momentum', listify(val, self._mom)) + elif 'betas' in self.opt_keys: + self.set_val('betas', (listify(val, self._mom), self._beta)) + self._mom = listify(val, self._mom) + + @property + def beta(self) -> float: + return None if self._beta is None else self._beta[-1] + + @beta.setter + def beta(self, val: float) -> None: + "Set beta (or alpha as makes sense for given optimizer)." + if val is None: return + if 'betas' in self.opt_keys: + self.set_val('betas', (self._mom, listify(val, self._beta))) + elif 'alpha' in self.opt_keys: + self.set_val('alpha', listify(val, self._beta)) + self._beta = listify(val, self._beta) + + @property + def wd(self) -> float: + return self._wd[-1] + + @wd.setter + def wd(self, val: float) -> None: + "Set weight decay." + if not self.true_wd: self.set_val('weight_decay', listify(val, self._wd), bn_groups=self.bn_wd) + self._wd = listify(val, self._wd) + + # Helper functions + def read_defaults(self) -> None: + "Read the values inside the optimizer for the hyper-parameters." + self._beta = None + if 'lr' in self.opt_keys: self._lr = self.read_val('lr') + if 'momentum' in self.opt_keys: self._mom = self.read_val('momentum') + if 'alpha' in self.opt_keys: self._beta = self.read_val('alpha') + if 'betas' in self.opt_keys: self._mom, self._beta = self.read_val('betas') + if 'weight_decay' in self.opt_keys: self._wd = self.read_val('weight_decay') + + def set_val(self, key: str, val, bn_groups: bool = True): + "Set `val` inside the optimizer dictionary at `key`." + if is_tuple(val): val = [(v1, v2) for v1, v2 in zip(*val)] + for v, pg1, pg2 in zip(val, self.opt.param_groups[::2], self.opt.param_groups[1::2]): + pg1[key] = v + if bn_groups: pg2[key] = v + return val + + def read_val(self, key: str): + "Read a hyperparameter `key` in the optimizer dictionary." + val = [pg[key] for pg in self.opt.param_groups[::2]] + if is_tuple(val[0]): val = [o[0] for o in val], [o[1] for o in val] + return val + + +class FastAIMixedOptim(OptimWrapper): + @classmethod + def create(cls, opt_func, lr, + layer_groups, model, flat_master=False, loss_scale=512.0, **kwargs): + "Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`." + opt = OptimWrapper.create(opt_func, lr, layer_groups, **kwargs) + opt.model_params, opt.master_params = get_master(layer_groups, flat_master) + opt.flat_master = flat_master + opt.loss_scale = loss_scale + opt.model = model + # Changes the optimizer so that the optimization step is done in FP32. + # opt = self.learn.opt + mom, wd, beta = opt.mom, opt.wd, opt.beta + lrs = [lr for lr in opt._lr for _ in range(2)] + opt_params = [{'params': mp, 'lr': lr} for mp, lr in zip(opt.master_params, lrs)] + opt.opt = opt_func(opt_params) + opt.mom, opt.wd, opt.beta = mom, wd, beta + return opt + + def step(self): + model_g2master_g(self.model_params, self.master_params, self.flat_master) + for group in self.master_params: + for param in group: param.grad.div_(self.loss_scale) + super(FastAIMixedOptim, self).step() + self.model.zero_grad() + # Update the params from master to model. + master2model(self.model_params, self.master_params, self.flat_master) diff --git a/toolbox/openpcdet/tools/train_utils/optimization/learning_schedules_fastai.py b/toolbox/openpcdet/tools/train_utils/optimization/learning_schedules_fastai.py new file mode 100644 index 000000000..15f7d2349 --- /dev/null +++ b/toolbox/openpcdet/tools/train_utils/optimization/learning_schedules_fastai.py @@ -0,0 +1,162 @@ +# This file is modified from https://github.com/traveller59/second.pytorch + +import math +from functools import partial + +import numpy as np +import torch.optim.lr_scheduler as lr_sched + +from .fastai_optim import OptimWrapper + + +class LRSchedulerStep(object): + def __init__(self, fai_optimizer: OptimWrapper, total_step, lr_phases, + mom_phases): + # if not isinstance(fai_optimizer, OptimWrapper): + # raise TypeError('{} is not a fastai OptimWrapper'.format( + # type(fai_optimizer).__name__)) + self.optimizer = fai_optimizer + self.total_step = total_step + self.lr_phases = [] + + for i, (start, lambda_func) in enumerate(lr_phases): + if len(self.lr_phases) != 0: + assert self.lr_phases[-1][0] < start + if isinstance(lambda_func, str): + lambda_func = eval(lambda_func) + if i < len(lr_phases) - 1: + self.lr_phases.append((int(start * total_step), int(lr_phases[i + 1][0] * total_step), lambda_func)) + else: + self.lr_phases.append((int(start * total_step), total_step, lambda_func)) + assert self.lr_phases[0][0] == 0 + self.mom_phases = [] + for i, (start, lambda_func) in enumerate(mom_phases): + if len(self.mom_phases) != 0: + assert self.mom_phases[-1][0] < start + if isinstance(lambda_func, str): + lambda_func = eval(lambda_func) + if i < len(mom_phases) - 1: + self.mom_phases.append((int(start * total_step), int(mom_phases[i + 1][0] * total_step), lambda_func)) + else: + self.mom_phases.append((int(start * total_step), total_step, lambda_func)) + assert self.mom_phases[0][0] == 0 + + def step(self, step, epoch=None): + for start, end, func in self.lr_phases: + if step >= start: + self.optimizer.lr = func((step - start) / (end - start)) + for start, end, func in self.mom_phases: + if step >= start: + self.optimizer.mom = func((step - start) / (end - start)) + + +def annealing_cos(start, end, pct): + # print(pct, start, end) + "Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0." + cos_out = np.cos(np.pi * pct) + 1 + return end + (start - end) / 2 * cos_out + + +class OneCycle(LRSchedulerStep): + def __init__(self, fai_optimizer, total_step, lr_max, moms, div_factor, + pct_start): + self.lr_max = lr_max + self.moms = moms + self.div_factor = div_factor + self.pct_start = pct_start + a1 = int(total_step * self.pct_start) + a2 = total_step - a1 + low_lr = self.lr_max / self.div_factor + lr_phases = ((0, partial(annealing_cos, low_lr, self.lr_max)), + (self.pct_start, + partial(annealing_cos, self.lr_max, low_lr / 1e4))) + mom_phases = ((0, partial(annealing_cos, *self.moms)), + (self.pct_start, partial(annealing_cos, + *self.moms[::-1]))) + fai_optimizer.lr, fai_optimizer.mom = low_lr, self.moms[0] + super().__init__(fai_optimizer, total_step, lr_phases, mom_phases) + + +class CosineWarmupLR(lr_sched._LRScheduler): + def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1): + self.T_max = T_max + self.eta_min = eta_min + super(CosineWarmupLR, self).__init__(optimizer, last_epoch) + + def get_lr(self, epoch=None): + return [self.eta_min + (base_lr - self.eta_min) * + (1 - math.cos(math.pi * self.last_epoch / self.T_max)) / 2 + for base_lr in self.base_lrs] + + +def linear_warmup(end, lr_max, pct): + k = (1 - pct / end) * (1 - 0.33333333) + warmup_lr = lr_max * (1 - k) + return warmup_lr + + +class CosineAnnealing(LRSchedulerStep): + def __init__(self, fai_optimizer, total_step, total_epoch, lr_max, moms, pct_start, warmup_iter): + self.lr_max = lr_max + self.moms = moms + self.pct_start = pct_start + + mom_phases = ((0, partial(annealing_cos, *self.moms)), + (self.pct_start, partial(annealing_cos, + *self.moms[::-1]))) + fai_optimizer.lr, fai_optimizer.mom = lr_max, self.moms[0] + + self.optimizer = fai_optimizer + self.total_step = total_step + self.warmup_iter = warmup_iter + self.total_epoch = total_epoch + + self.mom_phases = [] + for i, (start, lambda_func) in enumerate(mom_phases): + if len(self.mom_phases) != 0: + assert self.mom_phases[-1][0] < start + if isinstance(lambda_func, str): + lambda_func = eval(lambda_func) + if i < len(mom_phases) - 1: + self.mom_phases.append((int(start * total_step), int(mom_phases[i + 1][0] * total_step), lambda_func)) + else: + self.mom_phases.append((int(start * total_step), total_step, lambda_func)) + assert self.mom_phases[0][0] == 0 + + def step(self, step, epoch): + # update lr + if step < self.warmup_iter: + self.optimizer.lr = linear_warmup(self.warmup_iter, self.lr_max, step) + else: + target_lr = self.lr_max * 0.001 + cos_lr = annealing_cos(self.lr_max, target_lr, epoch / self.total_epoch) + self.optimizer.lr = cos_lr + # update mom + for start, end, func in self.mom_phases: + if step >= start: + self.optimizer.mom = func((step - start) / (end - start)) + + +class FakeOptim: + def __init__(self): + self.lr = 0 + self.mom = 0 + + +if __name__ == "__main__": + import matplotlib.pyplot as plt + + opt = FakeOptim() # 3e-3, wd=0.4, div_factor=10 + schd = OneCycle(opt, 100, 3e-3, (0.95, 0.85), 10.0, 0.1) + + lrs = [] + moms = [] + for i in range(100): + schd.step(i) + lrs.append(opt.lr) + moms.append(opt.mom) + plt.plot(lrs) + # plt.plot(moms) + plt.show() + plt.plot(moms) + plt.show() diff --git a/toolbox/openpcdet/tools/train_utils/train_utils.py b/toolbox/openpcdet/tools/train_utils/train_utils.py new file mode 100644 index 000000000..04071fb0e --- /dev/null +++ b/toolbox/openpcdet/tools/train_utils/train_utils.py @@ -0,0 +1,272 @@ +import os + +import torch +import tqdm +import time +import glob +from torch.nn.utils import clip_grad_norm_ +from pcdet.utils import common_utils, commu_utils + + +def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg, + rank, tbar, total_it_each_epoch, dataloader_iter, tb_log=None, leave_pbar=False, + use_logger_to_record=False, logger=None, logger_iter_interval=50, cur_epoch=None, + total_epochs=None, ckpt_save_dir=None, ckpt_save_time_interval=300, show_gpu_stat=False, use_amp=False): + if total_it_each_epoch == len(train_loader): + dataloader_iter = iter(train_loader) + + ckpt_save_cnt = 1 + start_it = accumulated_iter % total_it_each_epoch + + scaler = torch.cuda.amp.GradScaler(enabled=use_amp, init_scale=optim_cfg.get('LOSS_SCALE_FP16', 2.0**16)) + + if rank == 0: + pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True) + data_time = common_utils.AverageMeter() + batch_time = common_utils.AverageMeter() + forward_time = common_utils.AverageMeter() + losses_m = common_utils.AverageMeter() + + end = time.time() + for cur_it in range(start_it, total_it_each_epoch): + try: + batch = next(dataloader_iter) + except StopIteration: + dataloader_iter = iter(train_loader) + batch = next(dataloader_iter) + print('new iters') + + data_timer = time.time() + cur_data_time = data_timer - end + + lr_scheduler.step(accumulated_iter, cur_epoch) + + try: + cur_lr = float(optimizer.lr) + except: + cur_lr = optimizer.param_groups[0]['lr'] + + if tb_log is not None: + tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter) + + model.train() + optimizer.zero_grad() + + with torch.cuda.amp.autocast(enabled=use_amp): + loss, tb_dict, disp_dict = model_func(model, batch) + + scaler.scale(loss).backward() + scaler.unscale_(optimizer) + clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP) + scaler.step(optimizer) + scaler.update() + + accumulated_iter += 1 + + cur_forward_time = time.time() - data_timer + cur_batch_time = time.time() - end + end = time.time() + + # average reduce + avg_data_time = commu_utils.average_reduce_value(cur_data_time) + avg_forward_time = commu_utils.average_reduce_value(cur_forward_time) + avg_batch_time = commu_utils.average_reduce_value(cur_batch_time) + + # log to console and tensorboard + if rank == 0: + batch_size = batch.get('batch_size', None) + + data_time.update(avg_data_time) + forward_time.update(avg_forward_time) + batch_time.update(avg_batch_time) + losses_m.update(loss.item() , batch_size) + + disp_dict.update({ + 'loss': loss.item(), 'lr': cur_lr, 'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})', + 'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})', 'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})' + }) + + if use_logger_to_record: + if accumulated_iter % logger_iter_interval == 0 or cur_it == start_it or cur_it + 1 == total_it_each_epoch: + trained_time_past_all = tbar.format_dict['elapsed'] + second_each_iter = pbar.format_dict['elapsed'] / max(cur_it - start_it + 1, 1.0) + + trained_time_each_epoch = pbar.format_dict['elapsed'] + remaining_second_each_epoch = second_each_iter * (total_it_each_epoch - cur_it) + remaining_second_all = second_each_iter * ((total_epochs - cur_epoch) * total_it_each_epoch - cur_it) + + logger.info( + 'Train: {:>4d}/{} ({:>3.0f}%) [{:>4d}/{} ({:>3.0f}%)] ' + 'Loss: {loss.val:#.4g} ({loss.avg:#.3g}) ' + 'LR: {lr:.3e} ' + f'Time cost: {tbar.format_interval(trained_time_each_epoch)}/{tbar.format_interval(remaining_second_each_epoch)} ' + f'[{tbar.format_interval(trained_time_past_all)}/{tbar.format_interval(remaining_second_all)}] ' + 'Acc_iter {acc_iter:<10d} ' + 'Data time: {data_time.val:.2f}({data_time.avg:.2f}) ' + 'Forward time: {forward_time.val:.2f}({forward_time.avg:.2f}) ' + 'Batch time: {batch_time.val:.2f}({batch_time.avg:.2f})'.format( + cur_epoch+1,total_epochs, 100. * (cur_epoch+1) / total_epochs, + cur_it,total_it_each_epoch, 100. * cur_it / total_it_each_epoch, + loss=losses_m, + lr=cur_lr, + acc_iter=accumulated_iter, + data_time=data_time, + forward_time=forward_time, + batch_time=batch_time + ) + ) + + if show_gpu_stat and accumulated_iter % (3 * logger_iter_interval) == 0: + # To show the GPU utilization, please install gpustat through "pip install gpustat" + gpu_info = os.popen('gpustat').read() + logger.info(gpu_info) + else: + pbar.update() + pbar.set_postfix(dict(total_it=accumulated_iter)) + tbar.set_postfix(disp_dict) + # tbar.refresh() + + if tb_log is not None: + tb_log.add_scalar('train/loss', loss, accumulated_iter) + tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter) + for key, val in tb_dict.items(): + tb_log.add_scalar('train/' + key, val, accumulated_iter) + + # save intermediate ckpt every {ckpt_save_time_interval} seconds + time_past_this_epoch = pbar.format_dict['elapsed'] + if time_past_this_epoch // ckpt_save_time_interval >= ckpt_save_cnt: + ckpt_name = ckpt_save_dir / 'latest_model' + save_checkpoint( + checkpoint_state(model, optimizer, cur_epoch, accumulated_iter), filename=ckpt_name, + ) + logger.info(f'Save latest model to {ckpt_name}') + ckpt_save_cnt += 1 + + if rank == 0: + pbar.close() + return accumulated_iter + + +def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_cfg, + start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, train_sampler=None, + lr_warmup_scheduler=None, ckpt_save_interval=1, max_ckpt_save_num=50, + merge_all_iters_to_one_epoch=False, use_amp=False, + use_logger_to_record=False, logger=None, logger_iter_interval=None, ckpt_save_time_interval=None, show_gpu_stat=False, cfg=None): + accumulated_iter = start_iter + + # use for disable data augmentation hook + hook_config = cfg.get('HOOK', None) + augment_disable_flag = False + + with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar: + total_it_each_epoch = len(train_loader) + if merge_all_iters_to_one_epoch: + assert hasattr(train_loader.dataset, 'merge_all_iters_to_one_epoch') + train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs) + total_it_each_epoch = len(train_loader) // max(total_epochs, 1) + + dataloader_iter = iter(train_loader) + for cur_epoch in tbar: + if train_sampler is not None: + train_sampler.set_epoch(cur_epoch) + + # train one epoch + if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH: + cur_scheduler = lr_warmup_scheduler + else: + cur_scheduler = lr_scheduler + + augment_disable_flag = disable_augmentation_hook(hook_config, dataloader_iter, total_epochs, cur_epoch, cfg, augment_disable_flag, logger) + accumulated_iter = train_one_epoch( + model, optimizer, train_loader, model_func, + lr_scheduler=cur_scheduler, + accumulated_iter=accumulated_iter, optim_cfg=optim_cfg, + rank=rank, tbar=tbar, tb_log=tb_log, + leave_pbar=(cur_epoch + 1 == total_epochs), + total_it_each_epoch=total_it_each_epoch, + dataloader_iter=dataloader_iter, + + cur_epoch=cur_epoch, total_epochs=total_epochs, + use_logger_to_record=use_logger_to_record, + logger=logger, logger_iter_interval=logger_iter_interval, + ckpt_save_dir=ckpt_save_dir, ckpt_save_time_interval=ckpt_save_time_interval, + show_gpu_stat=show_gpu_stat, + use_amp=use_amp + ) + + # save trained model + trained_epoch = cur_epoch + 1 + if trained_epoch % ckpt_save_interval == 0 and rank == 0: + + ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth')) + ckpt_list.sort(key=os.path.getmtime) + + if ckpt_list.__len__() >= max_ckpt_save_num: + for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1): + os.remove(ckpt_list[cur_file_idx]) + + ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch) + save_checkpoint( + checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name, + ) + + +def model_state_to_cpu(model_state): + model_state_cpu = type(model_state)() # ordered dict + for key, val in model_state.items(): + model_state_cpu[key] = val.cpu() + return model_state_cpu + + +def checkpoint_state(model=None, optimizer=None, epoch=None, it=None): + optim_state = optimizer.state_dict() if optimizer is not None else None + if model is not None: + if isinstance(model, torch.nn.parallel.DistributedDataParallel): + model_state = model_state_to_cpu(model.module.state_dict()) + else: + model_state = model.state_dict() + else: + model_state = None + + try: + import pcdet + version = 'pcdet+' + pcdet.__version__ + except: + version = 'none' + + return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version} + + +def save_checkpoint(state, filename='checkpoint'): + if False and 'optimizer_state' in state: + optimizer_state = state['optimizer_state'] + state.pop('optimizer_state', None) + optimizer_filename = '{}_optim.pth'.format(filename) + if torch.__version__ >= '1.4': + torch.save({'optimizer_state': optimizer_state}, optimizer_filename, _use_new_zipfile_serialization=False) + else: + torch.save({'optimizer_state': optimizer_state}, optimizer_filename) + + filename = '{}.pth'.format(filename) + if torch.__version__ >= '1.4': + torch.save(state, filename, _use_new_zipfile_serialization=False) + else: + torch.save(state, filename) + + +def disable_augmentation_hook(hook_config, dataloader, total_epochs, cur_epoch, cfg, flag, logger): + """ + This hook turns off the data augmentation during training. + """ + if hook_config is not None: + DisableAugmentationHook = hook_config.get('DisableAugmentationHook', None) + if DisableAugmentationHook is not None: + num_last_epochs = DisableAugmentationHook.NUM_LAST_EPOCHS + if (total_epochs - num_last_epochs) <= cur_epoch and not flag: + DISABLE_AUG_LIST = DisableAugmentationHook.DISABLE_AUG_LIST + dataset_cfg=cfg.DATA_CONFIG + logger.info(f'Disable augmentations: {DISABLE_AUG_LIST}') + dataset_cfg.DATA_AUGMENTOR.DISABLE_AUG_LIST = DISABLE_AUG_LIST + dataloader._dataset.data_augmentor.disable_augmentation(dataset_cfg.DATA_AUGMENTOR) + flag = True + return flag \ No newline at end of file diff --git a/toolbox/openpcdet/tools/visual_utils/open3d_vis_utils.py b/toolbox/openpcdet/tools/visual_utils/open3d_vis_utils.py new file mode 100644 index 000000000..706d037bd --- /dev/null +++ b/toolbox/openpcdet/tools/visual_utils/open3d_vis_utils.py @@ -0,0 +1,116 @@ +""" +Open3d visualization tool box +Written by Jihan YANG +All rights preserved from 2021 - present. +""" +import open3d +import torch +import matplotlib +import numpy as np + +box_colormap = [ + [1, 1, 1], + [0, 1, 0], + [0, 1, 1], + [1, 1, 0], +] + + +def get_coor_colors(obj_labels): + """ + Args: + obj_labels: 1 is ground, labels > 1 indicates different instance cluster + + Returns: + rgb: [N, 3]. color for each point. + """ + colors = matplotlib.colors.XKCD_COLORS.values() + max_color_num = obj_labels.max() + + color_list = list(colors)[:max_color_num+1] + colors_rgba = [matplotlib.colors.to_rgba_array(color) for color in color_list] + label_rgba = np.array(colors_rgba)[obj_labels] + label_rgba = label_rgba.squeeze()[:, :3] + + return label_rgba + + +def draw_scenes(points, gt_boxes=None, ref_boxes=None, ref_labels=None, ref_scores=None, point_colors=None, draw_origin=True): + if isinstance(points, torch.Tensor): + points = points.cpu().numpy() + if isinstance(gt_boxes, torch.Tensor): + gt_boxes = gt_boxes.cpu().numpy() + if isinstance(ref_boxes, torch.Tensor): + ref_boxes = ref_boxes.cpu().numpy() + + vis = open3d.visualization.Visualizer() + vis.create_window() + + vis.get_render_option().point_size = 1.0 + vis.get_render_option().background_color = np.zeros(3) + + # draw origin + if draw_origin: + axis_pcd = open3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0, origin=[0, 0, 0]) + vis.add_geometry(axis_pcd) + + pts = open3d.geometry.PointCloud() + pts.points = open3d.utility.Vector3dVector(points[:, :3]) + + vis.add_geometry(pts) + if point_colors is None: + pts.colors = open3d.utility.Vector3dVector(np.ones((points.shape[0], 3))) + else: + pts.colors = open3d.utility.Vector3dVector(point_colors) + + if gt_boxes is not None: + vis = draw_box(vis, gt_boxes, (0, 0, 1)) + + if ref_boxes is not None: + vis = draw_box(vis, ref_boxes, (0, 1, 0), ref_labels, ref_scores) + + vis.run() + vis.destroy_window() + + +def translate_boxes_to_open3d_instance(gt_boxes): + """ + 4-------- 6 + /| /| + 5 -------- 3 . + | | | | + . 7 -------- 1 + |/ |/ + 2 -------- 0 + """ + center = gt_boxes[0:3] + lwh = gt_boxes[3:6] + axis_angles = np.array([0, 0, gt_boxes[6] + 1e-10]) + rot = open3d.geometry.get_rotation_matrix_from_axis_angle(axis_angles) + box3d = open3d.geometry.OrientedBoundingBox(center, rot, lwh) + + line_set = open3d.geometry.LineSet.create_from_oriented_bounding_box(box3d) + + # import ipdb; ipdb.set_trace(context=20) + lines = np.asarray(line_set.lines) + lines = np.concatenate([lines, np.array([[1, 4], [7, 6]])], axis=0) + + line_set.lines = open3d.utility.Vector2iVector(lines) + + return line_set, box3d + + +def draw_box(vis, gt_boxes, color=(0, 1, 0), ref_labels=None, score=None): + for i in range(gt_boxes.shape[0]): + line_set, box3d = translate_boxes_to_open3d_instance(gt_boxes[i]) + if ref_labels is None: + line_set.paint_uniform_color(color) + else: + line_set.paint_uniform_color(box_colormap[ref_labels[i]]) + + vis.add_geometry(line_set) + + # if score is not None: + # corners = box3d.get_box_points() + # vis.add_3d_label(corners[5], '%.2f' % score[i]) + return vis diff --git a/toolbox/openpcdet/tools/visual_utils/visualize_utils.py b/toolbox/openpcdet/tools/visual_utils/visualize_utils.py new file mode 100644 index 000000000..41a142d02 --- /dev/null +++ b/toolbox/openpcdet/tools/visual_utils/visualize_utils.py @@ -0,0 +1,215 @@ +import mayavi.mlab as mlab +import numpy as np +import torch + +box_colormap = [ + [1, 1, 1], + [0, 1, 0], + [0, 1, 1], + [1, 1, 0], +] + + +def check_numpy_to_torch(x): + if isinstance(x, np.ndarray): + return torch.from_numpy(x).float(), True + return x, False + + +def rotate_points_along_z(points, angle): + """ + Args: + points: (B, N, 3 + C) + angle: (B), angle along z-axis, angle increases x ==> y + Returns: + + """ + points, is_numpy = check_numpy_to_torch(points) + angle, _ = check_numpy_to_torch(angle) + + cosa = torch.cos(angle) + sina = torch.sin(angle) + zeros = angle.new_zeros(points.shape[0]) + ones = angle.new_ones(points.shape[0]) + rot_matrix = torch.stack(( + cosa, sina, zeros, + -sina, cosa, zeros, + zeros, zeros, ones + ), dim=1).view(-1, 3, 3).float() + points_rot = torch.matmul(points[:, :, 0:3], rot_matrix) + points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1) + return points_rot.numpy() if is_numpy else points_rot + + +def boxes_to_corners_3d(boxes3d): + """ + 7 -------- 4 + /| /| + 6 -------- 5 . + | | | | + . 3 -------- 0 + |/ |/ + 2 -------- 1 + Args: + boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center + + Returns: + """ + boxes3d, is_numpy = check_numpy_to_torch(boxes3d) + + template = boxes3d.new_tensor(( + [1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1], + [1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1], + )) / 2 + + corners3d = boxes3d[:, None, 3:6].repeat(1, 8, 1) * template[None, :, :] + corners3d = rotate_points_along_z(corners3d.view(-1, 8, 3), boxes3d[:, 6]).view(-1, 8, 3) + corners3d += boxes3d[:, None, 0:3] + + return corners3d.numpy() if is_numpy else corners3d + + +def visualize_pts(pts, fig=None, bgcolor=(0, 0, 0), fgcolor=(1.0, 1.0, 1.0), + show_intensity=False, size=(600, 600), draw_origin=True): + if not isinstance(pts, np.ndarray): + pts = pts.cpu().numpy() + if fig is None: + fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=fgcolor, engine=None, size=size) + + if show_intensity: + G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 3], mode='point', + colormap='gnuplot', scale_factor=1, figure=fig) + else: + G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='point', + colormap='gnuplot', scale_factor=1, figure=fig) + if draw_origin: + mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2) + mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), tube_radius=0.1) + mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), tube_radius=0.1) + mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), tube_radius=0.1) + + return fig + + +def draw_sphere_pts(pts, color=(0, 1, 0), fig=None, bgcolor=(0, 0, 0), scale_factor=0.2): + if not isinstance(pts, np.ndarray): + pts = pts.cpu().numpy() + + if fig is None: + fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(600, 600)) + + if isinstance(color, np.ndarray) and color.shape[0] == 1: + color = color[0] + color = (color[0] / 255.0, color[1] / 255.0, color[2] / 255.0) + + if isinstance(color, np.ndarray): + pts_color = np.zeros((pts.__len__(), 4), dtype=np.uint8) + pts_color[:, 0:3] = color + pts_color[:, 3] = 255 + G = mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], np.arange(0, pts_color.__len__()), mode='sphere', + scale_factor=scale_factor, figure=fig) + G.glyph.color_mode = 'color_by_scalar' + G.glyph.scale_mode = 'scale_by_vector' + G.module_manager.scalar_lut_manager.lut.table = pts_color + else: + mlab.points3d(pts[:, 0], pts[:, 1], pts[:, 2], mode='sphere', color=color, + colormap='gnuplot', scale_factor=scale_factor, figure=fig) + + mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='cube', scale_factor=0.2) + mlab.plot3d([0, 3], [0, 0], [0, 0], color=(0, 0, 1), line_width=3, tube_radius=None, figure=fig) + mlab.plot3d([0, 0], [0, 3], [0, 0], color=(0, 1, 0), line_width=3, tube_radius=None, figure=fig) + mlab.plot3d([0, 0], [0, 0], [0, 3], color=(1, 0, 0), line_width=3, tube_radius=None, figure=fig) + + return fig + + +def draw_grid(x1, y1, x2, y2, fig, tube_radius=None, color=(0.5, 0.5, 0.5)): + mlab.plot3d([x1, x1], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig) + mlab.plot3d([x2, x2], [y1, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig) + mlab.plot3d([x1, x2], [y1, y1], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig) + mlab.plot3d([x1, x2], [y2, y2], [0, 0], color=color, tube_radius=tube_radius, line_width=1, figure=fig) + return fig + + +def draw_multi_grid_range(fig, grid_size=20, bv_range=(-60, -60, 60, 60)): + for x in range(bv_range[0], bv_range[2], grid_size): + for y in range(bv_range[1], bv_range[3], grid_size): + fig = draw_grid(x, y, x + grid_size, y + grid_size, fig) + + return fig + + +def draw_scenes(points, gt_boxes=None, ref_boxes=None, ref_scores=None, ref_labels=None): + if not isinstance(points, np.ndarray): + points = points.cpu().numpy() + if ref_boxes is not None and not isinstance(ref_boxes, np.ndarray): + ref_boxes = ref_boxes.cpu().numpy() + if gt_boxes is not None and not isinstance(gt_boxes, np.ndarray): + gt_boxes = gt_boxes.cpu().numpy() + if ref_scores is not None and not isinstance(ref_scores, np.ndarray): + ref_scores = ref_scores.cpu().numpy() + if ref_labels is not None and not isinstance(ref_labels, np.ndarray): + ref_labels = ref_labels.cpu().numpy() + + fig = visualize_pts(points) + fig = draw_multi_grid_range(fig, bv_range=(0, -40, 80, 40)) + if gt_boxes is not None: + corners3d = boxes_to_corners_3d(gt_boxes) + fig = draw_corners3d(corners3d, fig=fig, color=(0, 0, 1), max_num=100) + + if ref_boxes is not None and len(ref_boxes) > 0: + ref_corners3d = boxes_to_corners_3d(ref_boxes) + if ref_labels is None: + fig = draw_corners3d(ref_corners3d, fig=fig, color=(0, 1, 0), cls=ref_scores, max_num=100) + else: + for k in range(ref_labels.min(), ref_labels.max() + 1): + cur_color = tuple(box_colormap[k % len(box_colormap)]) + mask = (ref_labels == k) + fig = draw_corners3d(ref_corners3d[mask], fig=fig, color=cur_color, cls=ref_scores[mask], max_num=100) + mlab.view(azimuth=-179, elevation=54.0, distance=104.0, roll=90.0) + return fig + + +def draw_corners3d(corners3d, fig, color=(1, 1, 1), line_width=2, cls=None, tag='', max_num=500, tube_radius=None): + """ + :param corners3d: (N, 8, 3) + :param fig: + :param color: + :param line_width: + :param cls: + :param tag: + :param max_num: + :return: + """ + import mayavi.mlab as mlab + num = min(max_num, len(corners3d)) + for n in range(num): + b = corners3d[n] # (8, 3) + + if cls is not None: + if isinstance(cls, np.ndarray): + mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%.2f' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig) + else: + mlab.text3d(b[6, 0], b[6, 1], b[6, 2], '%s' % cls[n], scale=(0.3, 0.3, 0.3), color=color, figure=fig) + + for k in range(0, 4): + i, j = k, (k + 1) % 4 + mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius, + line_width=line_width, figure=fig) + + i, j = k + 4, (k + 1) % 4 + 4 + mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius, + line_width=line_width, figure=fig) + + i, j = k, k + 4 + mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius, + line_width=line_width, figure=fig) + + i, j = 0, 5 + mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius, + line_width=line_width, figure=fig) + i, j = 1, 4 + mlab.plot3d([b[i, 0], b[j, 0]], [b[i, 1], b[j, 1]], [b[i, 2], b[j, 2]], color=color, tube_radius=tube_radius, + line_width=line_width, figure=fig) + + return fig diff --git a/toolbox/spconv/.gitignore b/toolbox/spconv/.gitignore new file mode 100644 index 000000000..eedae348c --- /dev/null +++ b/toolbox/spconv/.gitignore @@ -0,0 +1,109 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so +*.o +*.out + +# Distribution / packaging +.Python +build/ +build_pip/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +.vscode diff --git a/toolbox/spconv/.gitmodules b/toolbox/spconv/.gitmodules new file mode 100644 index 000000000..2347f2a3f --- /dev/null +++ b/toolbox/spconv/.gitmodules @@ -0,0 +1,3 @@ +[submodule "third_party/pybind11"] + path = third_party/pybind11 + url = ssh://git@bitbucket.iluvatar.ai:7999/adapt/pybind11.git diff --git a/toolbox/spconv/CMakeLists.txt b/toolbox/spconv/CMakeLists.txt new file mode 100644 index 000000000..216e1682f --- /dev/null +++ b/toolbox/spconv/CMakeLists.txt @@ -0,0 +1,55 @@ +cmake_minimum_required(VERSION 3.13 FATAL_ERROR) +project(SparseConv LANGUAGES CXX CUDA VERSION 1.0) + +option(SPCONV_BuildTests "Build the unit tests when BUILD_TESTING is enabled." ON) +set(CMAKE_CXX_EXTENSIONS OFF) # avoid gnu++11 be added to CXX flags + +if (NOT DEFINED ENV{SW_HOME}) + set(CUDA_TOOLKIT_ROOT_DIR "/usr/local/corex") +else() + set(CUDA_TOOLKIT_ROOT_DIR "$ENV{SW_HOME}/local/corex") +endif() + +MESSAGE( STATUS "this var key = ${CUDA_TOOLKIT_ROOT_DIR}.") +# get_filename_component(CUDA_TOOLKIT_ROOT_DIR "${CUDA_TOOLKIT_ROOT_DIR}" DIRECTORY) +# get_filename_component(CUDA_TOOLKIT_ROOT_DIR "${CUDA_TOOLKIT_ROOT_DIR}" DIRECTORY) +if(WIN32) # true if windows (32 and 64 bit) + set(CUDA_LIB_PATH_HINTS "${CUDA_TOOLKIT_ROOT_DIR}/lib/x64") + add_compile_definitions(TV_WINDOWS) +else() + set(CUDA_LIB_PATH_HINTS "${CUDA_TOOLKIT_ROOT_DIR}/lib64") + MESSAGE( STATUS "this var key = ${CUDA_LIB_PATH_HINTS}.") +endif() + +find_library(CUDA_CUDART NAMES cudart HINTS ${CUDA_LIB_PATH_HINTS}) +find_library(CUDA_CUBLAS NAMES cublas HINTS ${CUDA_LIB_PATH_HINTS}) +if(CMAKE_BUILD_TYPE STREQUAL "Debug") + add_compile_definitions(TV_DEBUG) +endif() + +find_package(Torch REQUIRED) + +# add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) + +add_compile_definitions(SPCONV_CUDA) +add_subdirectory(third_party/pybind11) + +# set(CMAKE_C_COMPILER "/usr/bin/gcc") +# set(CMAKE_CXX_COMPILER "/usr/bin/g++") + +set(ALL_LIBS ${CUDA_CUDART} ${CUDA_CUBLAS} ${TORCH_LIBRARIES}) + +set(ALL_INCLUDE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES} + ${PROJECT_SOURCE_DIR}/include) + + +add_subdirectory(src/spconv) +add_subdirectory(src/utils) + +if (SPCONV_BuildTests) + include(CTest) #adds option BUILD_TESTING (default ON) + if(BUILD_TESTING) + enable_testing() + add_subdirectory(test) + endif() +endif() diff --git a/toolbox/spconv/LICENSE b/toolbox/spconv/LICENSE new file mode 100644 index 000000000..081a04a3b --- /dev/null +++ b/toolbox/spconv/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2019 Yan Yan + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/toolbox/spconv/README-ILUVATAR.md b/toolbox/spconv/README-ILUVATAR.md new file mode 100644 index 000000000..7cb737b51 --- /dev/null +++ b/toolbox/spconv/README-ILUVATAR.md @@ -0,0 +1,16 @@ +## git clone +```shell +git clone --recurse-submodules ssh://git@bitbucket.iluvatar.ai:7999/swapp/spconv.git +``` + +## build +```shell +bash build_spconv.sh +``` +## install +```shell +bash install_spconv.sh +``` +## clean +```shell +bash clean_spconv.sh \ No newline at end of file diff --git a/toolbox/spconv/README.md b/toolbox/spconv/README.md new file mode 100644 index 000000000..e8da50a4a --- /dev/null +++ b/toolbox/spconv/README.md @@ -0,0 +1,142 @@ +This is not what I realized. The original author is [yan yan](https://github.com/traveller59), this project provides convenience for everyone to download the spcov1.0 version. + + +# SpConv: PyTorch Spatially Sparse Convolution Library + +This is a spatially sparse convolution library like [SparseConvNet](https://github.com/facebookresearch/SparseConvNet) but faster and easy to read. This library provide sparse convolution/transposed, submanifold convolution, inverse convolution and sparse maxpool. + +If you need more kinds of spatial layers such as avg pool, please implement it by yourself, I don't have time to do this. + +The GPU Indice Generation algorithm is a unofficial implementation of paper [SECOND](http://www.mdpi.com/1424-8220/18/10/3337). That algorithm (don't include GPU SubM indice generation algorithm) may be protected by patent. + +This project only support CUDA 9.0+. If you are using cuda 8.0, please update it to 9.0. + +## Install + +0. Use ```git clone xxx.git --recursive``` to clone this repo. + +1. Install boost headers to your system include path, you can use either ```sudo apt-get install libboostall-dev``` or download compressed files from boost official website and copy headers to include path. + +2. Download cmake >= 3.13.2, then add cmake executables to PATH. + +3. Ensure you have install pytorch 1.0 in your environment, run ```python setup.py bdist_wheel``` (don't use ```python setup.py install```). + +4. Run ```cd ./dist```, use pip to install generated whl file. + +## Compare with SparseConvNet + +### Features + +* SparseConvNet's Sparse Convolution don't support padding and dilation, spconv support this. + +* spconv only contains sparse convolutions, the batchnorm and activations can directly use layers from torch.nn, SparseConvNet contains lots of their own implementation of layers such as batchnorm and activations. + +### Speed + +* spconv is faster than SparseConvNet due to gpu indice generation and gather-gemm-scatter algorithm. SparseConvNet use hand-written gemm which is slow. + +## Usage + +### SparseConvTensor + +```Python +features = # your features with shape [N, numPlanes] +indices = # your indices/coordinates with shape [N, ndim + 1], batch index must be put in indices[:, 0] +spatial_shape = # spatial shape of your sparse tensor. +batch_size = # batch size of your sparse tensor. +x = spconv.SparseConvTensor(features, indices, spatial_shape, batch_size) +x_dense_NCHW = x.dense() # convert sparse tensor to dense NCHW tensor. +print(x.sparity) # helper function to check sparity. +``` + +### Sparse Convolution + +```Python +import spconv +from torch import nn +class ExampleNet(nn.Module): + def __init__(self, shape): + super().__init__() + self.net = spconv.SparseSequential( + spconv.SparseConv3d(32, 64, 3), # just like nn.Conv3d but don't support group and all([d > 1, s > 1]) + nn.BatchNorm1d(64), # non-spatial layers can be used directly in SparseSequential. + nn.ReLU(), + spconv.SubMConv3d(64, 64, 3, indice_key="subm0"), + nn.BatchNorm1d(64), + nn.ReLU(), + # when use submanifold convolutions, their indices can be shared to save indices generation time. + spconv.SubMConv3d(64, 64, 3, indice_key="subm0"), + nn.BatchNorm1d(64), + nn.ReLU(), + spconv.SparseConvTranspose3d(64, 64, 3, 2), + nn.BatchNorm1d(64), + nn.ReLU(), + spconv.ToDense(), # convert spconv tensor to dense and convert it to NCHW format. + nn.Conv3d(64, 64, 3), + nn.BatchNorm1d(64), + nn.ReLU(), + ) + self.shape = shape + + def forward(self, features, coors, batch_size): + coors = coors.int() # unlike torch, this library only accept int coordinates. + x = spconv.SparseConvTensor(features, coors, self.shape, batch_size) + return self.net(x)# .dense() +``` + +### Inverse Convolution + +Inverse sparse convolution means "inv" of sparse convolution. the output of inverse convolution contains same indices as input of sparse convolution. + +Inverse convolution usually used in semantic segmentation. + +```Python +class ExampleNet(nn.Module): + def __init__(self, shape): + super().__init__() + self.net = spconv.SparseSequential( + spconv.SparseConv3d(32, 64, 3, 2, indice_key="cp0"), + spconv.SparseInverseConv3d(64, 32, 3, indice_key="cp0"), # need provide kernel size to create weight + ) + self.shape = shape + + def forward(self, features, coors, batch_size): + coors = coors.int() + x = spconv.SparseConvTensor(features, coors, self.shape, batch_size) + return self.net(x) +``` + +### Utility functions + +* convert point cloud to voxel + +```Python + +voxel_generator = spconv.utils.VoxelGenerator( + voxel_size=[0.1, 0.1, 0.1], + point_cloud_range=[-50, -50, -3, 50, 50, 1], + max_num_points=30, + max_voxels=40000 +) + +points = # [N, 3+] tensor. +voxels, coords, num_points_per_voxel = voxel_generator.generate(points) +``` + +## Implementation Details + +This implementation use gather-gemm-scatter framework to do sparse convolution. + +## Projects using spconv: + +* [second.pytorch](https://github.com/traveller59/second.pytorch): Point Cloud Object Detection in KITTI Dataset. + +## Authors + +* **Yan Yan** - *Initial work* - [traveller59](https://github.com/traveller59) + +* **Bo Li** - *gpu indice generation idea, owner of patent of the sparse conv gpu indice generation algorithm (don't include subm)* - [prclibo](https://github.com/prclibo) + +## License + +This project is licensed under the Apache license 2.0 License - see the [LICENSE.md](LICENSE.md) file for details diff --git a/toolbox/spconv/build_spconv.sh b/toolbox/spconv/build_spconv.sh new file mode 100644 index 000000000..9189d4ce7 --- /dev/null +++ b/toolbox/spconv/build_spconv.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +COREX_VERSION=${COREX_VERSION:-latest} +MAX_JOBS=${MAX_JOBS:-$(nproc --all)} +PYTHON_PATH=$(which python3) +${PYTHON_PATH} -m pip list | grep "^torch .*+corex" || { + echo "ERROR: building mmcv requries the corex torch has been installed." + exit 1 +} + +export MAX_JOBS=${MAX_JOBS} + +PY_VERSION=$(python3 -V 2>&1|awk '{print $2}'|awk -F '.' '{print $2}') + +if [ "$PY_VERSION" == "6" ] || [ "$PY_VERSION" == "7" ]; +then + export CPATH=$CPATH:/usr/local/include/python3.${PY_VERSION}m/ +else + export CPATH=$CPATH:/usr/local/include/python3.${PY_VERSION}/ +fi + +SIGN=${SIGN:-$(ixsmi -L)} +if [[ $SIGN =~ MR || $SIGN =~ BI-V150 ]] +then + export CMAKE_CUDA_ARCHITECTURES=ivcore11 +else + export CMAKE_CUDA_ARCHITECTURES=ivcore10 +fi + +${PYTHON_PATH} setup.py bdist_wheel -d build_pip 2>&1 | tee compile.log || exit +# Return 0 status if all finished +exit 0 + diff --git a/toolbox/spconv/clean_spconv.sh b/toolbox/spconv/clean_spconv.sh new file mode 100644 index 000000000..f43a0ef49 --- /dev/null +++ b/toolbox/spconv/clean_spconv.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +rm -rf build +rm -rf build_pip +rm -rf spconv.egg-info +pip3 uninstall spconv -y + +# Return 0 status if all finished +exit 0 diff --git a/toolbox/spconv/include/paramsgrid.h b/toolbox/spconv/include/paramsgrid.h new file mode 100644 index 000000000..62d4a89c5 --- /dev/null +++ b/toolbox/spconv/include/paramsgrid.h @@ -0,0 +1,62 @@ +// Copyright 2019 Yan Yan +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef PARAMS_GRID_H_ +#define PARAMS_GRID_H_ +#include +#include + +namespace detail { +template int getTotalSize(std::vector arg) { return arg.size(); } + +template +int getTotalSize(std::vector arg, std::vector... args) { + return arg.size() * getTotalSize(args...); +} +template int getSize(std::vector arg) { return arg.size(); } + +template +void assigner(TT &src, std::vector counter, std::vector &arg) { + std::get(src) = arg[counter[Idx]]; +} + +template +void assigner(TT &src, std::vector counter, std::vector &arg, + std::vector &... args) { + std::get(src) = arg[counter[Idx]]; + assigner(src, counter, args...); +} +} // namespace detail +template +std::vector> paramsGrid(std::vector... args) { + int length = detail::getTotalSize(args...); + std::vector sizes = {detail::getSize(args)...}; + int size = sizes.size(); + + std::vector> params(length); + std::vector counter(size); + for (int i = 0; i < length; ++i) { + detail::assigner<0>(params[i], counter, args...); + counter[size - 1] += 1; + for (int c = size - 1; c >= 0; --c) { + if (counter[c] == sizes[c] && c > 0) { + counter[c - 1] += 1; + counter[c] = 0; + } + } + } + return params; +} + +#endif \ No newline at end of file diff --git a/toolbox/spconv/include/prettyprint.h b/toolbox/spconv/include/prettyprint.h new file mode 100644 index 000000000..72cdb9ce9 --- /dev/null +++ b/toolbox/spconv/include/prettyprint.h @@ -0,0 +1,445 @@ +// Copyright Louis Delacroix 2010 - 2014. +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// A pretty printing library for C++ +// +// Usage: +// Include this header, and operator<< will "just work". + +#ifndef H_PRETTY_PRINT +#define H_PRETTY_PRINT + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace pretty_print +{ + namespace detail + { + // SFINAE type trait to detect whether T::const_iterator exists. + + struct sfinae_base + { + using yes = char; + using no = yes[2]; + }; + + template + struct has_const_iterator : private sfinae_base + { + private: + template static yes & test(typename C::const_iterator*); + template static no & test(...); + public: + static const bool value = sizeof(test(nullptr)) == sizeof(yes); + using type = T; + }; + + template + struct has_begin_end : private sfinae_base + { + private: + template + static yes & f(typename std::enable_if< + std::is_same(&C::begin)), + typename C::const_iterator(C::*)() const>::value>::type *); + + template static no & f(...); + + template + static yes & g(typename std::enable_if< + std::is_same(&C::end)), + typename C::const_iterator(C::*)() const>::value, void>::type*); + + template static no & g(...); + + public: + static bool const beg_value = sizeof(f(nullptr)) == sizeof(yes); + static bool const end_value = sizeof(g(nullptr)) == sizeof(yes); + }; + + } // namespace detail + + + // Holds the delimiter values for a specific character type + + template + struct delimiters_values + { + using char_type = TChar; + const char_type * prefix; + const char_type * delimiter; + const char_type * postfix; + }; + + + // Defines the delimiter values for a specific container and character type + + template + struct delimiters + { + using type = delimiters_values; + static const type values; + }; + + + // Functor to print containers. You can use this directly if you want + // to specificy a non-default delimiters type. The printing logic can + // be customized by specializing the nested template. + + template , + typename TDelimiters = delimiters> + struct print_container_helper + { + using delimiters_type = TDelimiters; + using ostream_type = std::basic_ostream; + + template + struct printer + { + static void print_body(const U & c, ostream_type & stream) + { + using std::begin; + using std::end; + + auto it = begin(c); + const auto the_end = end(c); + + if (it != the_end) + { + for ( ; ; ) + { + stream << *it; + + if (++it == the_end) break; + + if (delimiters_type::values.delimiter != NULL) + stream << delimiters_type::values.delimiter; + } + } + } + }; + + print_container_helper(const T & container) + : container_(container) + { } + + inline void operator()(ostream_type & stream) const + { + if (delimiters_type::values.prefix != NULL) + stream << delimiters_type::values.prefix; + + printer::print_body(container_, stream); + + if (delimiters_type::values.postfix != NULL) + stream << delimiters_type::values.postfix; + } + + private: + const T & container_; + }; + + // Specialization for pairs + + template + template + struct print_container_helper::printer> + { + using ostream_type = typename print_container_helper::ostream_type; + + static void print_body(const std::pair & c, ostream_type & stream) + { + stream << c.first; + if (print_container_helper::delimiters_type::values.delimiter != NULL) + stream << print_container_helper::delimiters_type::values.delimiter; + stream << c.second; + } + }; + + // Specialization for tuples + + template + template + struct print_container_helper::printer> + { + using ostream_type = typename print_container_helper::ostream_type; + using element_type = std::tuple; + + template struct Int { }; + + static void print_body(const element_type & c, ostream_type & stream) + { + tuple_print(c, stream, Int<0>()); + } + + static void tuple_print(const element_type &, ostream_type &, Int) + { + } + + static void tuple_print(const element_type & c, ostream_type & stream, + typename std::conditional, std::nullptr_t>::type) + { + stream << std::get<0>(c); + tuple_print(c, stream, Int<1>()); + } + + template + static void tuple_print(const element_type & c, ostream_type & stream, Int) + { + if (print_container_helper::delimiters_type::values.delimiter != NULL) + stream << print_container_helper::delimiters_type::values.delimiter; + + stream << std::get(c); + + tuple_print(c, stream, Int()); + } + }; + + // Prints a print_container_helper to the specified stream. + + template + inline std::basic_ostream & operator<<( + std::basic_ostream & stream, + const print_container_helper & helper) + { + helper(stream); + return stream; + } + + + // Basic is_container template; specialize to derive from std::true_type for all desired container types + + template + struct is_container : public std::integral_constant::value && + detail::has_begin_end::beg_value && + detail::has_begin_end::end_value> { }; + + template + struct is_container : std::true_type { }; + + template + struct is_container : std::false_type { }; + + template + struct is_container> : std::true_type { }; + + template + struct is_container> : std::true_type { }; + + template + struct is_container> : std::true_type { }; + + + // Default delimiters + + template struct delimiters { static const delimiters_values values; }; + template const delimiters_values delimiters::values = { "[", ", ", "]" }; + template struct delimiters { static const delimiters_values values; }; + template const delimiters_values delimiters::values = { L"[", L", ", L"]" }; + + + // Delimiters for (multi)set and unordered_(multi)set + + template + struct delimiters< ::std::set, char> { static const delimiters_values values; }; + + template + const delimiters_values delimiters< ::std::set, char>::values = { "{", ", ", "}" }; + + template + struct delimiters< ::std::set, wchar_t> { static const delimiters_values values; }; + + template + const delimiters_values delimiters< ::std::set, wchar_t>::values = { L"{", L", ", L"}" }; + + template + struct delimiters< ::std::multiset, char> { static const delimiters_values values; }; + + template + const delimiters_values delimiters< ::std::multiset, char>::values = { "{", ", ", "}" }; + + template + struct delimiters< ::std::multiset, wchar_t> { static const delimiters_values values; }; + + template + const delimiters_values delimiters< ::std::multiset, wchar_t>::values = { L"{", L", ", L"}" }; + + template + struct delimiters< ::std::unordered_set, char> { static const delimiters_values values; }; + + template + const delimiters_values delimiters< ::std::unordered_set, char>::values = { "{", ", ", "}" }; + + template + struct delimiters< ::std::unordered_set, wchar_t> { static const delimiters_values values; }; + + template + const delimiters_values delimiters< ::std::unordered_set, wchar_t>::values = { L"{", L", ", L"}" }; + + template + struct delimiters< ::std::unordered_multiset, char> { static const delimiters_values values; }; + + template + const delimiters_values delimiters< ::std::unordered_multiset, char>::values = { "{", ", ", "}" }; + + template + struct delimiters< ::std::unordered_multiset, wchar_t> { static const delimiters_values values; }; + + template + const delimiters_values delimiters< ::std::unordered_multiset, wchar_t>::values = { L"{", L", ", L"}" }; + + + // Delimiters for pair and tuple + + template struct delimiters, char> { static const delimiters_values values; }; + template const delimiters_values delimiters, char>::values = { "(", ", ", ")" }; + template struct delimiters< ::std::pair, wchar_t> { static const delimiters_values values; }; + template const delimiters_values delimiters< ::std::pair, wchar_t>::values = { L"(", L", ", L")" }; + + template struct delimiters, char> { static const delimiters_values values; }; + template const delimiters_values delimiters, char>::values = { "(", ", ", ")" }; + template struct delimiters< ::std::tuple, wchar_t> { static const delimiters_values values; }; + template const delimiters_values delimiters< ::std::tuple, wchar_t>::values = { L"(", L", ", L")" }; + + + // Type-erasing helper class for easy use of custom delimiters. + // Requires TCharTraits = std::char_traits and TChar = char or wchar_t, and MyDelims needs to be defined for TChar. + // Usage: "cout << pretty_print::custom_delims(x)". + + struct custom_delims_base + { + virtual ~custom_delims_base() { } + virtual std::ostream & stream(::std::ostream &) = 0; + virtual std::wostream & stream(::std::wostream &) = 0; + }; + + template + struct custom_delims_wrapper : custom_delims_base + { + custom_delims_wrapper(const T & t_) : t(t_) { } + + std::ostream & stream(std::ostream & s) + { + return s << print_container_helper, Delims>(t); + } + + std::wostream & stream(std::wostream & s) + { + return s << print_container_helper, Delims>(t); + } + + private: + const T & t; + }; + + template + struct custom_delims + { + template + custom_delims(const Container & c) : base(new custom_delims_wrapper(c)) { } + + std::unique_ptr base; + }; + + template + inline std::basic_ostream & operator<<(std::basic_ostream & s, const custom_delims & p) + { + return p.base->stream(s); + } + + + // A wrapper for a C-style array given as pointer-plus-size. + // Usage: std::cout << pretty_print_array(arr, n) << std::endl; + + template + struct array_wrapper_n + { + typedef const T * const_iterator; + typedef T value_type; + + array_wrapper_n(const T * const a, size_t n) : _array(a), _n(n) { } + inline const_iterator begin() const { return _array; } + inline const_iterator end() const { return _array + _n; } + + private: + const T * const _array; + size_t _n; + }; + + + // A wrapper for hash-table based containers that offer local iterators to each bucket. + // Usage: std::cout << bucket_print(m, 4) << std::endl; (Prints bucket 5 of container m.) + + template + struct bucket_print_wrapper + { + typedef typename T::const_local_iterator const_iterator; + typedef typename T::size_type size_type; + + const_iterator begin() const + { + return m_map.cbegin(n); + } + + const_iterator end() const + { + return m_map.cend(n); + } + + bucket_print_wrapper(const T & m, size_type bucket) : m_map(m), n(bucket) { } + + private: + const T & m_map; + const size_type n; + }; + +} // namespace pretty_print + + +// Global accessor functions for the convenience wrappers + +template +inline pretty_print::array_wrapper_n pretty_print_array(const T * const a, size_t n) +{ + return pretty_print::array_wrapper_n(a, n); +} + +template pretty_print::bucket_print_wrapper +bucket_print(const T & m, typename T::size_type n) +{ + return pretty_print::bucket_print_wrapper(m, n); +} + + +// Main magic entry point: An overload snuck into namespace std. +// Can we do better? + +namespace std +{ + // Prints a container to the stream using default delimiters + + template + inline typename enable_if< ::pretty_print::is_container::value, + basic_ostream &>::type + operator<<(basic_ostream & stream, const T & container) + { + return stream << ::pretty_print::print_container_helper(container); + } +} + + + +#endif // H_PRETTY_PRINT diff --git a/toolbox/spconv/include/pybind11_utils.h b/toolbox/spconv/include/pybind11_utils.h new file mode 100644 index 000000000..dfda11608 --- /dev/null +++ b/toolbox/spconv/include/pybind11_utils.h @@ -0,0 +1,61 @@ +// Copyright 2019 Yan Yan +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include // everything needed for embedding +#include +#include +#include +#include + +#include + +namespace py = pybind11; + +template +std::vector array2Vector(TPyObject arr){ + py::array arr_np = arr; + size_t size = arr.attr("size").template cast(); + py::array_t arr_cc = arr_np; + std::vector data(arr_cc.data(), arr_cc.data() + size); + return data; +} + +template +std::vector arrayT2Vector(py::array_t arr) +{ + std::vector data(arr.data(), arr.data() + arr.size()); + return data; +} + +template +tv::TensorView array2TensorView(TPyObject arr){ + py::array arr_np = arr; + py::array_t arr_cc = arr_np; + tv::Shape shape; + for (int i = 0; i < arr_cc.ndim(); ++i){ + shape.push_back(arr_cc.shape(i)); + } + return tv::TensorView(arr_cc.mutable_data(), shape); +} +template +tv::TensorView arrayT2TensorView(py::array_t arr){ + tv::Shape shape; + for (int i = 0; i < arr.ndim(); ++i){ + shape.push_back(arr.shape(i)); + } + return tv::TensorView(arr.mutable_data(), shape); +} \ No newline at end of file diff --git a/toolbox/spconv/include/spconv/box_iou.h b/toolbox/spconv/include/spconv/box_iou.h new file mode 100644 index 000000000..2d190f012 --- /dev/null +++ b/toolbox/spconv/include/spconv/box_iou.h @@ -0,0 +1,103 @@ +// Copyright 2019 Yan Yan +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +#ifndef BOX_IOU_H +#define BOX_IOU_H + +#include +// must include pybind11/eigen.h if using eigen matrix as arguments. +#include +#include +#include + +#include + +namespace spconv { +// #include "voxelnet/core/cc/pybind11_helper.h" +namespace py = pybind11; +using namespace pybind11::literals; +template +inline py::array_t constant(ShapeContainer shape, DType value) { + // create ROWMAJOR array. + py::array_t array(shape); + std::fill(array.mutable_data(), array.mutable_data() + array.size(), value); + return array; +} + +template +inline py::array_t zeros(std::vector shape) { + return constant>(shape, 0); +} + +template +py::array_t +rbbox_iou(py::array_t box_corners, py::array_t qbox_corners, + py::array_t standup_iou, DType standup_thresh) { + namespace bg = boost::geometry; + typedef bg::model::point point_t; + typedef bg::model::polygon polygon_t; + polygon_t poly, qpoly; + std::vector poly_inter, poly_union; + DType inter_area, union_area; + auto box_corners_r = box_corners.template unchecked<3>(); + auto qbox_corners_r = qbox_corners.template unchecked<3>(); + auto standup_iou_r = standup_iou.template unchecked<2>(); + auto N = box_corners_r.shape(0); + auto K = qbox_corners_r.shape(0); + py::array_t overlaps = zeros({N, K}); + auto overlaps_rw = overlaps.template mutable_unchecked<2>(); + if (N == 0 || K == 0) { + return overlaps; + } + for (int k = 0; k < K; ++k) { + for (int n = 0; n < N; ++n) { + if (standup_iou_r(n, k) <= standup_thresh) + continue; + bg::append(poly, point_t(box_corners_r(n, 0, 0), box_corners_r(n, 0, 1))); + bg::append(poly, point_t(box_corners_r(n, 1, 0), box_corners_r(n, 1, 1))); + bg::append(poly, point_t(box_corners_r(n, 2, 0), box_corners_r(n, 2, 1))); + bg::append(poly, point_t(box_corners_r(n, 3, 0), box_corners_r(n, 3, 1))); + bg::append(poly, point_t(box_corners_r(n, 0, 0), box_corners_r(n, 0, 1))); + bg::append(qpoly, + point_t(qbox_corners_r(k, 0, 0), qbox_corners_r(k, 0, 1))); + bg::append(qpoly, + point_t(qbox_corners_r(k, 1, 0), qbox_corners_r(k, 1, 1))); + bg::append(qpoly, + point_t(qbox_corners_r(k, 2, 0), qbox_corners_r(k, 2, 1))); + bg::append(qpoly, + point_t(qbox_corners_r(k, 3, 0), qbox_corners_r(k, 3, 1))); + bg::append(qpoly, + point_t(qbox_corners_r(k, 0, 0), qbox_corners_r(k, 0, 1))); + + bg::intersection(poly, qpoly, poly_inter); + + if (!poly_inter.empty()) { + inter_area = bg::area(poly_inter.front()); + bg::union_(poly, qpoly, poly_union); + if (!poly_union.empty()) { + union_area = bg::area(poly_union.front()); + overlaps_rw(n, k) = inter_area / union_area; + } + poly_union.clear(); + } + poly.clear(); + qpoly.clear(); + poly_inter.clear(); + } + } + return overlaps; +} +} // namespace spconv +#endif \ No newline at end of file diff --git a/toolbox/spconv/include/spconv/geometry.h b/toolbox/spconv/include/spconv/geometry.h new file mode 100644 index 000000000..b4c382e64 --- /dev/null +++ b/toolbox/spconv/include/spconv/geometry.h @@ -0,0 +1,297 @@ +// Copyright 2019 Yan Yan +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef SPCONV_GEOMETRY_H_ +#define SPCONV_GEOMETRY_H_ + +#include +#include +#include + +namespace spconv { +template +TV_HOST_DEVICE Index getValidOutPos(const Index *input_pos, + const Index *kernelSize, + const Index *stride, const Index *padding, + const Index *dilation, + const Index *outSpatialShape, Index *out) { + Index lowers[NDim]; + Index uppers[NDim]; + Index counter[NDim]; + Index counterSize[NDim]; + Index pointCounter = 0; + Index val; + Index numPoints = 1; + Index m, offset; + bool valid = false; +#pragma unroll + for (int i = 0; i < NDim; ++i) { + lowers[i] = (input_pos[i] - (kernelSize[i] - 1) * dilation[i] - 1 + + stride[i] + padding[i]) / + stride[i]; + uppers[i] = (input_pos[i] + padding[i]) / stride[i]; + } + +#pragma unroll + for (unsigned i = 0; i < NDim; ++i) { + counterSize[i] = ((uppers[i] - lowers[i]) / dilation[i] + 1); + numPoints *= counterSize[i]; + } + +#pragma unroll + for (int i = 0; i < NDim; ++i) { + counter[i] = 0; + } + for (int i = 0; i < numPoints; ++i) { + valid = true; + m = 1; + offset = 0; +#pragma unroll + for (int j = NDim - 1; j >= 0; --j) { + val = uppers[j] - counter[j] * dilation[j]; + out[pointCounter * (NDim + 1) + j] = val; + if (val < 0 || (val > outSpatialShape[j] - 1)) { + valid = false; + // break; + } + offset += m * (input_pos[j] - val * stride[j] + padding[j]) / dilation[j]; + m *= kernelSize[j]; + } + + out[pointCounter * (NDim + 1) + NDim] = offset; + if (valid) + ++pointCounter; + counter[NDim - 1] += 1; +#pragma unroll + for (int c = NDim - 1; c >= 0; --c) { + if (counter[c] == counterSize[c] && c > 0) { + counter[c - 1] += 1; + counter[c] = 0; + } + } + } + return pointCounter; +} + +template +TV_HOST_DEVICE Index getValidOutPosTranspose( + const Index *input_pos, const Index *kernelSize, const Index *stride, + const Index *padding, const Index *dilation, const Index *outSpatialShape, + Index *out) { + Index lowers[NDim]; + Index uppers[NDim]; + Index counter[NDim]; + Index counterSize[NDim]; + Index pointCounter = 0; + Index val; + Index numPoints = 1; + Index m, offset; + bool valid = false; +#pragma unroll + for (int i = 0; i < NDim; ++i) { + lowers[i] = input_pos[i] * stride[i] - padding[i]; + uppers[i] = lowers[i] + (kernelSize[i] - 1) * dilation[i]; + } +#pragma unroll + for (unsigned i = 0; i < NDim; ++i) { + counterSize[i] = ((uppers[i] - lowers[i]) / dilation[i] + 1); + numPoints *= counterSize[i]; + } +#pragma unroll + for (int i = 0; i < NDim; ++i) { + counter[i] = 0; + } + for (int i = 0; i < numPoints; ++i) { + valid = true; + m = 1; + offset = 0; +#pragma unroll + for (int j = NDim - 1; j >= 0; --j) { + val = uppers[j] - counter[j] * dilation[j]; + out[pointCounter * (NDim + 1) + j] = val; + if (val < 0 || (val > outSpatialShape[j] - 1)) { + valid = false; + // break; + } + offset += m * (val - lowers[j]) / dilation[j]; + m *= kernelSize[j]; + } + out[pointCounter * (NDim + 1) + NDim] = offset; + if (valid) + ++pointCounter; + counter[NDim - 1] += 1; +#pragma unroll + for (int c = NDim - 1; c >= 0; --c) { + if (counter[c] == counterSize[c] && c > 0) { + counter[c - 1] += 1; + counter[c] = 0; + } + } + } + return pointCounter; +} + +template +Index getIndicePairsConv(tv::TensorView indicesIn, + tv::TensorView indicesOut, + tv::TensorView gridsOut, + tv::TensorView indicePairs, + tv::TensorView indiceNum, + const Index *kernelSize, const Index *stride, + const Index *padding, const Index *dilation, + const Index *outSpatialShape) { + // indicesOut: num_active * kernelVolume * (NDim + 1) + Index numAct = 0; + auto numActIn = indicesIn.dim(0); + Index batchIdx = 0; + Index spatialVolume = 1; +#pragma unroll + for (int i = 0; i < NDim; ++i) { + spatialVolume *= outSpatialShape[i]; + } + Index kernelVolume = 1; +#pragma unroll + for (int i = 0; i < NDim; ++i) { + kernelVolume *= kernelSize[i]; + } + Index numValidPoints = 0; + Index validPoints[kernelVolume * (NDim + 1)]; + Index *pointPtr = nullptr; + for (int j = 0; j < numActIn; ++j) { + batchIdx = indicesIn(j, 0); + numValidPoints = getValidOutPos( + indicesIn.data() + j * (NDim + 1) + 1, kernelSize, stride, padding, + dilation, outSpatialShape, validPoints); + for (Index i = 0; i < numValidPoints; ++i) { + pointPtr = validPoints + i * (NDim + 1); + auto offset = pointPtr[NDim]; + auto index = tv::rowArrayIdx(pointPtr, outSpatialShape) + + spatialVolume * batchIdx; + if (gridsOut[index] == -1) { + for (unsigned k = 1; k < NDim + 1; ++k) { + indicesOut(numAct, k) = pointPtr[k - 1]; + } + indicesOut(numAct, 0) = batchIdx; + gridsOut[index] = numAct++; + } + // indicePairs: [K, 2, L] + indicePairs(offset, 0, indiceNum[offset]) = j; + indicePairs(offset, 1, indiceNum[offset]++) = gridsOut[index]; + } + } + return numAct; +} + +template +Index getIndicePairsDeConv(tv::TensorView indicesIn, + tv::TensorView indicesOut, + tv::TensorView gridsOut, + tv::TensorView indicePairs, + tv::TensorView indiceNum, + const Index *kernelSize, const Index *stride, + const Index *padding, const Index *dilation, + const Index *outSpatialShape) { + Index numAct = 0; + auto numActIn = indicesIn.dim(0); + Index batchIdx = 0; + Index spatialVolume = 1; +#pragma unroll + for (int i = 0; i < NDim; ++i) { + spatialVolume *= outSpatialShape[i]; + } + Index kernelVolume = 1; +#pragma unroll + for (int i = 0; i < NDim; ++i) { + kernelVolume *= kernelSize[i]; + } + Index numValidPoints = 0; + Index validPoints[kernelVolume * (NDim + 1)]; + Index *pointPtr = nullptr; + for (int j = 0; j < numActIn; ++j) { + batchIdx = indicesIn(j, 0); + numValidPoints = getValidOutPosTranspose( + indicesIn.data() + j * (NDim + 1) + 1, kernelSize, stride, padding, + dilation, outSpatialShape, validPoints); + for (Index i = 0; i < numValidPoints; ++i) { + pointPtr = validPoints + i * (NDim + 1); + auto offset = pointPtr[NDim]; + auto index = tv::rowArrayIdx(pointPtr, outSpatialShape) + + spatialVolume * batchIdx; + if (gridsOut[index] == -1) { + for (unsigned k = 1; k < NDim + 1; ++k) { + indicesOut(numAct, k) = pointPtr[k - 1]; + } + indicesOut(numAct, 0) = batchIdx; + gridsOut[index] = numAct++; + } + // indicePairs: [K, 2, L] + indicePairs(offset, 0, indiceNum[offset]) = j; + indicePairs(offset, 1, indiceNum[offset]++) = gridsOut[index]; + } + } + return numAct; +} + +template +Index getIndicePairsSubM(tv::TensorView indicesIn, + tv::TensorView gridsOut, + tv::TensorView indicePairs, + tv::TensorView indiceNum, + const Index *const kernelSize, + const Index *const stride, const Index *const padding, + const Index *dilation, const Index *const outSpatialShape) { + Index numAct = 0; + auto numActIn = indicesIn.dim(0); + Index batchIdx = 0; + Index spatialVolume = 1; +#pragma unroll + for (int i = 0; i < NDim; ++i) { + spatialVolume *= outSpatialShape[i]; + } + Index kernelVolume = 1; +#pragma unroll + for (int i = 0; i < NDim; ++i) { + kernelVolume *= kernelSize[i]; + } + Index numValidPoints = 0; + Index validPoints[kernelVolume * (NDim + 1)]; + Index *pointPtr = nullptr; + Index index = 0; + for (int j = 0; j < numActIn; ++j) { + index = tv::rowArrayIdx(indicesIn.data() + j * (NDim + 1) + 1, + outSpatialShape) + + spatialVolume * indicesIn(j, 0); + gridsOut[index] = j; + } + for (int j = 0; j < numActIn; ++j) { + numValidPoints = getValidOutPos( + indicesIn.data() + j * (NDim + 1) + 1, kernelSize, stride, padding, + dilation, outSpatialShape, validPoints); + for (Index i = 0; i < numValidPoints; ++i) { + pointPtr = validPoints + i * (NDim + 1); + auto offset = pointPtr[NDim]; + index = tv::rowArrayIdx(pointPtr, outSpatialShape) + + spatialVolume * indicesIn(j, 0); + if (gridsOut[index] > -1) { + indicePairs(offset, 0, indiceNum[offset]) = j; + indicePairs(offset, 1, indiceNum[offset]++) = gridsOut[index]; + } + } + } + return numActIn; +} + +} // namespace spconv + +#endif \ No newline at end of file diff --git a/toolbox/spconv/include/spconv/indice.cu.h b/toolbox/spconv/include/spconv/indice.cu.h new file mode 100644 index 000000000..276d3390c --- /dev/null +++ b/toolbox/spconv/include/spconv/indice.cu.h @@ -0,0 +1,244 @@ +// Copyright 2019 Yan Yan +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef INDICE_CU_H_ +#define INDICE_CU_H_ +#include +#include +#include + +namespace spconv { +template +__global__ void prepareIndicePairsKernel( + tv::TensorView indicesIn, tv::TensorView indicesOut, + tv::TensorView gridsOut, tv::TensorView indicePairs, + tv::TensorView indiceNum, tv::TensorView indicePairUnique, + const tv::SimpleVector kernelSize, + const tv::SimpleVector stride, + const tv::SimpleVector padding, + const tv::SimpleVector dilation, + const tv::SimpleVector outSpatialShape) { + auto numActIn = indicesIn.dim(0); + Index spatialVolume = 1; +#pragma unroll + for (int i = 0; i < NDim; ++i) { + spatialVolume *= outSpatialShape[i]; + } + Index kernelVolume = 1; +#pragma unroll + for (int i = 0; i < NDim; ++i) { + kernelVolume *= kernelSize[i]; + } + Index numValidPoints = 0; + Index validPoints[KernelMaxVolume * (NDim + 1)]; + Index *pointPtr = nullptr; + auto indicePairsDim2 = indicePairs.dim(2); + Index index; + for (int ix : tv::KernelLoopX(numActIn)) { + numValidPoints = getValidOutPos( + indicesIn.data() + ix * (NDim + 1) + 1, kernelSize.data(), + stride.data(), padding.data(), dilation.data(), outSpatialShape.data(), + validPoints); + for (Index i = 0; i < numValidPoints; ++i) { + pointPtr = validPoints + i * (NDim + 1); + auto offset = pointPtr[NDim]; + auto oldNum = atomicAdd(indiceNum.data() + offset, Index(1)); + indicePairs(offset, 0, oldNum) = ix; + index = tv::rowArrayIdx(pointPtr, outSpatialShape.data()) + + spatialVolume * indicesIn(ix, 0); + indicePairs(offset, 1, oldNum) = index; + indicePairUnique[offset * indicePairsDim2 + oldNum] = index; + } + } +} + +template +__global__ void prepareDeConvIndicePairsKernel( + tv::TensorView indicesIn, tv::TensorView indicesOut, + tv::TensorView gridsOut, tv::TensorView indicePairs, + tv::TensorView indiceNum, tv::TensorView indicePairUnique, + const tv::SimpleVector kernelSize, + const tv::SimpleVector stride, + const tv::SimpleVector padding, + const tv::SimpleVector dilation, + const tv::SimpleVector outSpatialShape) { + auto numActIn = indicesIn.dim(0); + Index spatialVolume = 1; +#pragma unroll + for (int i = 0; i < NDim; ++i) { + spatialVolume *= outSpatialShape[i]; + } + Index kernelVolume = 1; +#pragma unroll + for (int i = 0; i < NDim; ++i) { + kernelVolume *= kernelSize[i]; + } + Index numValidPoints = 0; + Index validPoints[KernelMaxVolume * (NDim + 1)]; + Index *pointPtr = nullptr; + auto indicePairsDim2 = indicePairs.dim(2); + Index index; + for (int ix : tv::KernelLoopX(numActIn)) { + numValidPoints = getValidOutPosTranspose( + indicesIn.data() + ix * (NDim + 1) + 1, kernelSize.data(), + stride.data(), padding.data(), dilation.data(), outSpatialShape.data(), + validPoints); + for (Index i = 0; i < numValidPoints; ++i) { + pointPtr = validPoints + i * (NDim + 1); + auto offset = pointPtr[NDim]; + auto oldNum = atomicAdd(indiceNum.data() + offset, Index(1)); + indicePairs(offset, 0, oldNum) = ix; + index = tv::rowArrayIdx(pointPtr, outSpatialShape.data()) + + spatialVolume * indicesIn(ix, 0); + indicePairs(offset, 1, oldNum) = index; + indicePairUnique[offset * indicePairsDim2 + oldNum] = index; + } + } +} + +template +__global__ void assignGridAndIndiceOutKernel( + tv::TensorView indicesOut, tv::TensorView gridsOut, + int numAct, tv::TensorView indicePairs, + tv::TensorView indicePairUnique, + const tv::SimpleVector outSpatialShape, int batchSize) { + + Index index; + auto indicesOutPtr = indicesOut.data(); + for (int ix : tv::KernelLoopX(numAct)) { + index = indicePairUnique[ix]; + gridsOut[index] = ix; + index = tv::rowArrayIdxInv( + index, indicesOutPtr + ix * (NDim + 1) + 1, outSpatialShape.data()); + indicesOut[ix * (NDim + 1)] = index % batchSize; + } +} + +template +__global__ void +assignIndicePairsKernel(tv::TensorView indicesOut, + tv::TensorView gridsOut, int numActIn, + tv::TensorView indicePairs, + tv::TensorView indicePairUnique, + const tv::SimpleVector outSpatialShape) { + + Index index; + int kernelVolume = indicePairs.dim(0); + for (int ix : tv::KernelLoopX(numActIn)) { + for (int i = 0; i < kernelVolume; ++i) { + index = indicePairs(i, 1, ix); + if (index > -1) { + indicePairs(i, 1, ix) = gridsOut[index]; + } + } + } +} + +template +__global__ void +prepareSubMGridKernel(tv::TensorView indicesIn, + tv::TensorView gridsOut, + const tv::SimpleVector outSpatialShape) { + auto numActIn = indicesIn.dim(0); + Index spatialVolume = 1; +#pragma unroll + for (int i = 0; i < NDim; ++i) { + spatialVolume *= outSpatialShape[i]; + } + Index index = 0; + for (int ix : tv::KernelLoopX(numActIn)) { + index = tv::rowArrayIdx(indicesIn.data() + ix * (NDim + 1) + 1, + outSpatialShape.data()) + + spatialVolume * indicesIn(ix, 0); + gridsOut[index] = ix; + } +} + +template +__global__ void getSubMIndicePairsKernel( + tv::TensorView indicesIn, tv::TensorView gridsOut, + tv::TensorView indicePairs, tv::TensorView indiceNum, + const tv::SimpleVector kernelSize, + const tv::SimpleVector stride, + const tv::SimpleVector padding, + const tv::SimpleVector dilation, + const tv::SimpleVector outSpatialShape) { + auto numActIn = indicesIn.dim(0); + Index spatialVolume = 1; +#pragma unroll + for (int i = 0; i < NDim; ++i) { + spatialVolume *= outSpatialShape[i]; + } + Index numValidPoints = 0; + Index validPoints[KernelMaxVolume * (NDim + 1)]; + Index *pointPtr = nullptr; + Index index = 0; + for (int ix : tv::KernelLoopX(numActIn)) { + numValidPoints = getValidOutPos( + indicesIn.data() + ix * (NDim + 1) + 1, kernelSize.data(), + stride.data(), padding.data(), dilation.data(), outSpatialShape.data(), + validPoints); + for (int i = 0; i < numValidPoints; ++i) { + pointPtr = validPoints + i * (NDim + 1); + auto offset = pointPtr[NDim]; + index = tv::rowArrayIdx(pointPtr, outSpatialShape.data()) + + spatialVolume * indicesIn(ix, 0); + if (gridsOut[index] > -1) { + auto oldNum = atomicAdd(indiceNum.data() + offset, Index(1)); + indicePairs(offset, 1, oldNum) = gridsOut[index]; + indicePairs(offset, 0, oldNum) = ix; + } + } + } +} + +template +__global__ void resetGridKernel(const Index *indicePairUnique, + tv::TensorView gridsOut, + int numAct) { + for (int ix : tv::KernelLoopX(numAct)) { + gridsOut[indicePairUnique[ix]] = -1; + } +} + +template +__global__ void +resetGridSubMKernel(const Index *indices, tv::TensorView gridsOut, + const tv::SimpleVector outSpatialShape, + int numAct) { + int outSpatialShapeReg[NDim]; + for (int i = 0; i < NDim; ++i) { + outSpatialShapeReg[i] = outSpatialShape[i]; + } + Index spatialVolume = 1; + auto indsPtr = indices; +#pragma unroll + for (int i = 0; i < NDim; ++i) { + spatialVolume *= outSpatialShape[i]; + } + Index index; + for (int ix : tv::KernelLoopX(numAct)) { + indsPtr = indices + ix * (NDim + 1); + index = tv::rowArrayIdx(indsPtr + 1, outSpatialShapeReg); + gridsOut[index + spatialVolume * indsPtr[0]] = -1; + } +} + +} // namespace spconv + +#endif \ No newline at end of file diff --git a/toolbox/spconv/include/spconv/indice.h b/toolbox/spconv/include/spconv/indice.h new file mode 100644 index 000000000..10456783a --- /dev/null +++ b/toolbox/spconv/include/spconv/indice.h @@ -0,0 +1,79 @@ +// Copyright 2019 Yan Yan +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef SPARSE_CONV_INDICE_FUNCTOR_H_ +#define SPARSE_CONV_INDICE_FUNCTOR_H_ +#include + +namespace spconv +{ +namespace functor +{ +template +struct CreateConvIndicePairFunctorP1 +{ + Index operator()( + const Device& d, tv::TensorView indicesIn, + tv::TensorView indicesOut, tv::TensorView gridsOut, + tv::TensorView indicePairs, tv::TensorView indiceNum, + tv::TensorView indicePairUnique, + const tv::SimpleVector kernelSize, + const tv::SimpleVector stride, + const tv::SimpleVector padding, + const tv::SimpleVector dilation, + const tv::SimpleVector outSpatialShape, bool transpose); +}; + +template +struct CreateConvIndicePairFunctorP2 +{ + Index operator()( + const Device& d, tv::TensorView indicesIn, + tv::TensorView indicesOut, tv::TensorView gridsOut, + tv::TensorView indicePairs, tv::TensorView indiceNum, + tv::TensorView indicePairUnique, + const tv::SimpleVector outSpatialShape, bool transpose, + bool resetGrid=false); +}; + +template +struct CreateConvIndicePairFunctor +{ + Index operator()( + const Device& d, tv::TensorView indicesIn, + tv::TensorView indicesOut, tv::TensorView gridsOut, + tv::TensorView indicePairs, tv::TensorView indiceNum, + const tv::SimpleVector kernelSize, + const tv::SimpleVector stride, + const tv::SimpleVector padding, + const tv::SimpleVector dilation, + const tv::SimpleVector outSpatialShape, bool transpose, bool resetGrid=false); +}; + +template +struct CreateSubMIndicePairFunctor +{ + Index operator()( + const Device& d, tv::TensorView indicesIn, tv::TensorView gridsOut, + tv::TensorView indicePairs, tv::TensorView indiceNum, + const tv::SimpleVector kernelSize, + const tv::SimpleVector stride, + const tv::SimpleVector padding, + const tv::SimpleVector dilation, + const tv::SimpleVector outSpatialShape, bool transpose, bool resetGrid=false); +}; +} // namespace functor +} // namespace spconv + +#endif \ No newline at end of file diff --git a/toolbox/spconv/include/spconv/maxpool.h b/toolbox/spconv/include/spconv/maxpool.h new file mode 100644 index 000000000..f743e1346 --- /dev/null +++ b/toolbox/spconv/include/spconv/maxpool.h @@ -0,0 +1,44 @@ +// Copyright 2019 Yan Yan +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef SPARSE_MAXPOOL_FUNCTOR_H_ +#define SPARSE_MAXPOOL_FUNCTOR_H_ +#include + +namespace spconv +{ +namespace functor +{ +template +struct SparseMaxPoolForwardFunctor +{ + void operator()(const Device& d, tv::TensorView outFeatures, + tv::TensorView inFeatures, + tv::TensorView indices, int size); +}; + +template +struct SparseMaxPoolBackwardFunctor +{ + void operator()(const Device& d, tv::TensorView outFeatures, + tv::TensorView inFeatures, + tv::TensorView dout, + tv::TensorView din, + tv::TensorView indices, int size); +}; + +} // namespace functor +} // namespace spconv + +#endif \ No newline at end of file diff --git a/toolbox/spconv/include/spconv/mp_helper.h b/toolbox/spconv/include/spconv/mp_helper.h new file mode 100644 index 000000000..36681eebe --- /dev/null +++ b/toolbox/spconv/include/spconv/mp_helper.h @@ -0,0 +1,47 @@ +#ifndef MP_HELPER_H_ +#define MP_HELPER_H_ +#include +#include + +namespace spconv { +template struct mp_list {}; + +template +using mp_list_c = mp_list...>; + +namespace detail { + +template +constexpr F mp_for_each_impl(mp_list, F &&f) { + return std::initializer_list{(f(T()), 0)...}, std::forward(f); +} + +template constexpr F mp_for_each_impl(mp_list<>, F &&f) { + return std::forward(f); +} + +} // namespace detail + +namespace detail { + +template class B> struct mp_rename_impl { + // An error "no type named 'type'" here means that the first argument to + // mp_rename is not a list +}; + +template

E6(l#gy-6A5whV}Yw#U&o_wsM^R}lYj|oK| zODU+=ex#k$Rw9(rEjDQ18C{#1>Fb6^WYnYo$!5|v{MfYG6Esl_FpQLyM;r6rl-f0) zX$;==Z+%s9X~gF?R>Y1Fm2-B%BvN&z?Ms>TfR2`gSl+f%Y0Y*!Rpn{v-#?dP#q)C_ zUA;+!-!TD3Eo4%&Ut^fCpQYuuTIz_L%x;+4&NZ7*IkR2#ELa}$^-V>z`pF1M%j?og z3E5AML&o1@Nx@Y~%=*tSRaKd7#vRdI92PG>O1u@I=6Hm0v?JnHFpzn;6&AEOl1#78 zss!N{k=vxcH+{tpZFu9QG3y`P#4?fgK$CY8y#ZcHZsQl)V5cW~&R8h}#%RERp)M5)j$v zvd?ju>X~$ka@}j9u}WJVdj93}D!8L5J>XGoMHqjr`cA4YOU{QTfH%sIlR6k+Jc)f+ zA#{iX4ZPSf-FMVhTPT0{#w1;{b}UXPa|(BKYF~zXvq!LFKK!8Z*;RGa`BzT8gXLyn zaqw`8&?t8%a`^adaJ2(EnF6!+VudNO&?eU&q@vN-t|gl)R$AfgOI3q%(^rQmtTS>SaxrC0fb)bC-xy#ae&bYuS_IK4i zA>epV4t$}YKqNG}FG96c*={qjT#4TlYto6^FKE&>+IL>XN0azT@=G<_1(N|LRS(T5 zOla>xq_#~TDl3cnB#E~Z5{kIa8Y}k3 zp5O2_lU~z}L0M(x`gtms2HL)}r`f`%yhEP#)K!Rc=AxkzUJj1@T+c_;lqcJ(g1Z}N zL!iWoEq2Tx0H9R-?lPdnx2tUgDpmt`)SO(nE01r~ypBS2Q>Ci3ti=oX1o&w;M49Up z8%vmLQmZ=(`o%2WTSJ&L#+O?mOYme;9J1B?i8%wm`VpcWt=F+dJ4<~*+^eJ8>-J}x z;yJrQTDQb0gxuUt1chUFsT(~ntJw9}-AVnzVRi5{#sT@2$e?Y}3eu3wVPvlP;pO>_ z)06caT^(I^+XZ$ISi;WZ2tX{`t?277gCm>OCl7-G+87BGn`&+PqdB zyGJ2lKC<4`xQBW|=Yh%?TwKB7VfDF4>05Lx5xW_$bM+^RjV6o2W3#H_ctp-Zi&h0T zW@aT;%|Dm$Kp*rkXKg&aCJdh>)6UM zQ%m2Ms!Fl7%!bJAvT@K-(=!=qYGE1-7TTiKRFo8$c4U2mRG<_BrIh>clX zac3p*u3v5VD5i99K<{WXS?;lP@wF`hrb zUwve1Z%;?BLvuhaEQA$)yx3k6A9Pn88tvApnR$tCJQUD}OsYzXkk9UF{P^VAT3oA= zsfu_;96qW1T=>Sw7l*r(N*!H?WZzisrjEZHZ&?})v7r;G+cO;u=_#iOd#mPJKifcv z;T7$$Kl*AmeA=v>*cq9InP5TgWuU=05!Z|^htQgA5@K>RB3$*}y(P?v@$iFx9Jk=^ zL}Wwp^M-*^aT!@K&Nk8QoOp`~S`~+V8dPkZVKH;DHy>L8;dC3SRA&|)OpxDR!oV|oJ7l!@Rj!|S zgZDzp_09s;oUUjX0(&d2b`A~m$Xf?)7^1kapP(UaS2|@{Oa?C~&r?>GAX_7&t>-XM zpw3kIoL5hk|3-GVjGVi;|5Vnz8a*9%pcte=>rTw)=R(U`UtsQh2j`n3;s#<&SS#xb z!(^b2%hY!5On}(D?#Ub4uQ|p@{y7&uGUfB+z&Su$K;GoU+wL8H(S6GacMc#AId6O3 zIXIkeNRQYFdE-yvpUk%V71#{d9!d7;4DVlO7>f1o zUY{QxoS2K!7VpzXmK92p3b-w$KNq$=z7a@ab1^k;Q@|^LxG@^poTrPl52SD)w-GtH zbgn!vbBM;%RYC^jQG~Af((okP?igpom0UbC3sZvp+q&R_Nf5H! zN;OtaIB7p+QsSi=1$x|kus$d(G{!5lD8&nuO9MSsRWo>ns4Tcr^CndwBPpxqyJzNS zr_A_EkGw?KjgO=ttHAhdj89T%o#|d%Thw8LHYOSSN2f;5G5b|{#h$w)O2dMuZDc!C zyu|fvK4LVE2VKfzC(Vp^#}zs%InNr9oa(hjZ)Yv6O}ZPAyITt#k$h_nVFp~QWn_HL zL$|1(XB7ky?tE;p-5dbj)jf4P=)c-P0+BL8J{04oU}@LXS1jpH zkpyx`xvK_2E=~h9J91qpD1Mv!g`8s77EyK5YH(Ibg-Og3ZypgEE4jO7jLzQT2NYt0 zuQgjYh-&MHhdq+R7-p~{?e~V1-aD_T;BRIf)A?r=v)zhX#pO{=?PHFGUUTAmY;FhO^%J%Y3`CCFa8Cp-f@}c-+v@{YWBgI1U(clnm-tXe3F5WcR#v%%<=yH@bK@dWj?OW{z++g-NFbIO|BHtc*3fu z&57y0omE1JQZNvul8zVj)ZMO1EGP*!CL83`STN(xQhO+BfWcDiE!T~38$~9jxz>yI z8rFm`#q)S0a08x19rqIr@l#q8<4N-c%t`YhjnXsjwYGGmIHgLTlmiCeo_7vl=4J_G zP*8L4TTDtT*cpKAu5cC3+1tRDOgisg)|06UPH}dbqHSns=>7RpJg8>XT8;`!PF1zD z_Lhh#cBJ}-1Dx0ZNUdzVd>Nk*|Am6GyRH)CVr4}+(>!Z!1!y+mu>BtB_oC5tmncvS zy)CQOoQJ*bij`U<`I9b@D4$PjQ43{7>)a}V-Gxj6=>GsG4ICNh^z`%vco5{qyB=XS!&b6fTnpy~_ zb#hU}SuHJoS1ojlmJw|GZZk36G`fBpGIyqv9$ma~15zk~6^ACU^=vtX#*-n-CYA0< zCyw@etKPuiKB>c>nbo}RT@QU+WB7MhCgs##i#a-81T{Q0A7i!0=a1mUfN%1}NoQ3| zy`6K~CXf#ghwzslGidujqTu`_IfAhs0|^{TwtC%-nh_QhJAc2~C_#pZ-_F0jR^5lPuiJ065h z=Xjz$G`inN041(<5y4^4x$qN#IPd(rum4y|lP}zk*5e(SncevBZ(;U*N&6&C634%N zt*(~Mq}p5ThWz_ivym^$GCw$ptTetAO#(eD^3_(3@!P_IMLR|v;-qJ9WhJ1w12 zcQT^#>P6T9l!WnRcYC+5HWB)I9Ll6;XV99?Tcc>`LGiWAm@(~>m1xSa59@0b9*-VA zs{N~|-hQuf(V|BMiuh21iVq#t@cXz7vY?B_P+bU+b|!ki*iWFvC^bzgvy|vCk~KW<7u^Z(d?(2AZ!k<#-nE-@(LEy>>+`kkUI76cq@Q<}ySvy#jb6 z<|@>v^x|zUpoC0=ttEz_omXq)CcpKXTk|sQj~_LKk*akqxm0Y9JH+<;6c5S+<4ZR3 z42ZpIyKw@K3zKxz+NB_^ek4-=DVaWYcOW$X$aaJQFf~OnqD>7%hP+oKXuxt|T3P5Rj7aDhKL1xd7#BI%$u>=WA@As=-%(%We@7v!(R*?Q+6%9!2 z&w-z2?S*|&+l}-nR|d6)+&;}Xn@yllKc}&bW9#))n4;*QuqLee1K(6Oogp)<^}a|* z@imDvCxUR9WpqkwuqF+d%@lC9%*us^F5b9c)Gsd&DO|XtQ%*+!pjcy;`%^_KkM@wq zhm%mnC<9s3*ue8>x&V)hs5(dWhf;?AntEzx48E?)x5A z0RidmZt0K?=^P~lq`SMMyStPYq)S@5q`N^tx2?*K6USjefHTW zt@BNQ%gg(L`@lWNIIy*xLM&F#Jc~e9JN1+1K%?o~l_f?o8K3iA#0xM-La z4rCvX;U7N>LmAU*e$roRf=z+2>dYrABUixJ#Rl$3d(Cz1o6rEzx;(Xeh?lvp z2T*wIt;3Fu3$QGzXbSVHM2gEe0jc1>E(6}Ld3B?PzM%C5h@l1ryet_UGJi|K9uBH{ zY1KJO&!VK~r|GOvmCK{$@Uj+XGVY4mP!aB!2icMS^_`;f5^8*8L#ywp1HR(anE7>e zueIbf`BA5geGLsa!akZyO1cxw%%}n)WiALw4Gj$~M!6u*=g7-@ZDfegwkrb``IA2W z^}P=$=>qVd3?>wrhEo7Ow9B6vv=k`mhLm~teE_+@C0!vzK#-Kx|2<5PWbj-FF=s~A z#>dS>wB=mTH*mG=79G3n9+|gk{MY568~6+R#c;Z*I&XNN!HhAHjWOS*Oc`o1P+Gj) z8A#SGEROju0HC}PldWGJYxbZHPycpK-szcz+@hQImU&*99=r2Twu%{Gy!nDzNB{{1 zR()_to`HEj)I`#uxz*Tot5O7j{DAzgUs_t04+<`z7@pz#R|0wJM*BRx4z95X1bhJh)uk7B%l7sYOx%(5p`>V{ zxkjPnLQ)SuyeyvpuE$j!YEa3UD6*>E}W2$9=9Jpo|ed36pD()%EE_3H1JU0D48x8P}d*2Ax0 z@;_~VQ;t^`0~sQi@sQkG?AwaTXgERtiOl%VFSGe(FQVw2Y&JEnh!HkqCZ@|ioQ)3f z%?6=dLl|*4^t#6i^uW+WoG8zKC)w!&-_9wYF(M0yKft$j<3EPOiFh?8@VT~0 z&R>fCl)+A4V>_kmi~lQ>u5aU4Z3B0&9Up@3YvyO*7u+Mlj_ftyFb7_>wrZ&H`MySx zzQ5fR#1HM%Q)|BuPYoj>GWzSsESrVS|0b{)Ee(L%d;jB^^}9xvGl6i2h5R}!NF4Zd zP+Vk49t!T-%VX$bJGs4|yRWJO(kjQNZr=BdtugpfBZrszkrGrAod%a<;J&)GJm;N& zdj9mf>&29_KGJKL&z}!IQe#q}oB!U1-T_=s3S_`3uR{ZXnjWd!86QVY^$mR2Z$NB} z!l?y3zBw-+5s}_K$F&Q0L|8(E(5^YK#9c+ss2|`_O)ue(t6iY9ZwH4MSl;F}6vY7? z=j1k$^9Cv&%VFJ3uQ0XQ_eov7iqn+{FrBu+o-k*jWyyR1&HcPOETGQrAM)ZeE+juM ze@dGgv!j~HTDU!n3=0b@(1JfqDmRGA;GgAwB_A>BeHHsX_D}>SGHfNVl`9(e&ukh^ z4=X@dcbNm*sD2sx0@y~QKh1VkCR0*_^76p9<>WPl;mG4c|00b!EwuU?Ogqr6+Vk!% z))I}iK5|#Ioe)E}y`I@d7DW|wc2_KCUyj!!=WFoUJ($4fksk_tSuX^q2^cO0unX8uZoEJU(++6%R!OXDP_z{qVpO9v+P*Bu*Fv}FeQe0EYvZdDyo&A{ z51YtGa;LpQ)41vh0@{OEB8gsl3bUS=G1CQk_1<559`|$zg_(=-c4ybWaSb&>(|nu`fM7^+E0Uu z|6D8v$JjMSWbQc;{5$q9ax8R5)it%d^LuTtXaqS31Lv*R!cP+wv4f$F86Vyb%2nQ7 z8(y8#Fz+EpztG|(q^HIdSQM4WnHyubOQ@s$^&8HId5ys`%XM`zpJ?2USgBSuKkIiUl;z};yC4KGYJox(vBj7vNIj=$oc8Tdmn(Aj8 zZsAlucuA$}+_?i`Fgh&mTl@!ITU0zSVTQhcOj=BAokvd4B>)F$d%dVwl>UuS`TocZ z&tg~(y^M^uu!4;CpyuUqXBC`)y(-@vrwz)fdOc;H61Ir+tP59ca+6ortIF5c@4sNo zNN&OYLF!vK+VUds98*k4`>5Zl81OInR12;6q_7dG?wme%E;OGQ%!i2+!WF^nhrRMkF0$cLQu)7OXDfOP!Lb- zhF7^9yxAWzv3KbRcDRAvA^|;?J&LLtqM*abWv^*;!n?U~I%U{&eVxdNo`FgKDTdj3 zez0I`p3-o>QO`YIbqF^x@%za96de$?kbDvF z6g%p>Kih`uzVp-Q93nvL00XmOXDOkU%KM@6iE8#u;LZ+{sAh!A3( zonfWwSx^(xyee-fZH}QK0t!-BoZBsRUo7npD?%9f71?ygzf;>0my0edGNUGDr5r5 zI51V^CKqf%OcK1l_`o|pcP@W$&w;fmW@P0Fo!*q7rl$4@{CUomH2PiA-2zbzK~rxc zUh6fQq|zs9Y8KWvn90=*AEGVXt<;0izvgXK-JkAd$jeAV-T1P~_del#t$Her-u&L5 z$0Dg3Wdgl;2g@yCXPv_Gv;?_7!UB=sjN0^PRP>Zn^?L2{Q$FP^^cnTfxupFf z`E$NQmu=+b>UrWQ~TJgiT&nuXS5||HyZ%mlSi> z+>X^XH%na>G}3*Yw-YD^g)EtM%X2nc|}&v>>IPY^obLKJ-T zMk}0>)dAD5VpqNoJLQs@bg%kn@P5JW3O!aZJ?~#@f!;Fa6;Lb5BZyjS3)${^a+Z`B zj&nsXI_=;?#}Rb#p}d^JsS&dq*-7)(*+bvdCt6IbkokY?^ZQaq_H;sc(B3OyM?}6! z<$F*8(@UNWr2O_64b3SNCMIUPvV*}(`E4(V&nO@A#Rd`Q^>X~@GH$SML};m5OlZ@C zb;p)JEDs<%#uhjoDRMZ$w@X5OzMKCogrSJeQH0RSaU#hq`DE1Wqp2h$M2gY}#L==N zsCJBBWD8T<2}zmQB5JLC4$I#1E2obTQ&r+4>K)5IvF1h)O(a^tnkLU!*(fP|o#0K5 zXeNoWuX~?FM7IdZ)wMy7_`%ei@@~u<;-Ulxb~H*8W)`{#ibsxSsHgOz5Az>M>IPd2#rEVq>mGl z+(K~Gov8(wgGU&Xl{K})&|m4yHh$%@nu=LC@($yh5+*<0RFYa+4)v*Dfjsm=2IS;T zM{*iBO-=MUv>KotbO@*@efC$mMssaGG^q@-UXP z_qJ`0q-EApiA(PKE9oTrkv|eWz<22mo&AEy5KKSSzutHR!2S+hjVW#HaY=xHyOIpXmP`!H)uF>g ztuZ`(H8?a_sae)?-D_;kO@xUDLFiaD=(+)1LB z#DClIC}3Ofmc$-jpW@pd%x0DC&n6~(?bpK6T@Rk6#ULes+s(_T=5+c?haVs&X8yMY z?sd(4)Vy_XrQOws%Y#MH&u+YswA$6#{y~|Y{Hk2{ht}fm?mxLXIT;LzbY~lN%=kwQ zJGx?BosIZnN)5-^(tKYw(er6&T6ixJb$#vLes6N_v)`UZ>=oun0OskF>Q-HL*9jUz zw+MNhCz)5!=|*o{W?JW5s5FPgKRWP$)|~lM`3{Y^%XFgA12Siq-Wz}7^0p(9=wn&p zZLmXwnvQgfu*HQ+MR)X##isqUH1)mVi`*IQ9sg;nmPj<`?vVfyi?Yb z!A)1-+Hc=DX%m}udG?^e zAO=YqzDYaZlt2b^l^6NW3|_qojEid!OpR-p2B9I_W_p!1zDV*ED#k?wWX$wn3rvzlzB5Ljn6ym;y-k5F-PQ0HUscCqPI zHL{gN=s2GE;py}jZNKM{fQTM4WOmw*b{dJ?iMuH+t4W$kh!@h^TJdA3f0-N>FL&bN z5FrXFm#HQWhLb1$*TeE#rT@nT;Pa2WmMMv3>=#NyZHoEm_TpX7H(o|7`k}YfO*-P( zHFEk7_Q>yOzA@on3Y+RNqS?78u7<=rx3%*WFT^I+vc^vz(p|UABEj-RNH}>iK*pqJ zok3K~D7_-4T}Uf)PbJYaW47Csn|gXg#A{ii0k-0P@@U@=`ZrHaSE6+lIRo`Nxh``oSp8Cm0@l6-58 z+OJRO0Ws>jBk`>Q6I>|&?S9!vM*ajhL4Qe3k=*jtaeG}iJ3<=)*PU7X+i@&lPO1+U z2!K`MJcgnb%PV2uF8bi*>bWDSsusIU{h;`Yi^E}a<_5+Fr9AB2O>?9w{5t*<806jD zfDgjDidN3x>PcnPqoonhi8J45!{~CcClZIJn-w);+c(}|;l%HJG?@M1IWRCvNhc85 zziaMs(H4)yo;uQUF~Oz%fo@-og^ur(NoB<&u<8CD#_jY%7P-xe7qeY6@Vd9<@#xd@ zMmekI$n;)UTK2IK?C8igE+_<>g6-G-c)U7ot9+S%;IVN}4&1@tw&-I#R%2+>yJ@>Q z1PBy_%Ai~I_sN{f6YX1C+`uf~7eG82KVdA>z&>jqWRBYsYA@UW)Io?EsywCF|Ggs@ zVGWHqb3|tT`^#i;s3N=MEuPiyL-DzGGdw5L(ba?E#6_VWtRU7*QK03Rni}Uq=^DS2 za$FcUm~OJ!UHYk28$ebM23?fk0G(HVFi2ZT`;&};k8o4@RDm~qZ|gnb>*l4=AlGHH zU1sm1l9J60S6WW1TdwtgvxSvE`~q6Ektwnb6jFt;B$e0}orhj{TpV>3fT<>x9y9`V zwgsYIM(A-#U&BD_2HSePQa6ufz<}}ksjtY;^Bo==S&L52GP{*l35zb3R#Kwa;ot;L zJ+6`s^wbk5m~y5SoJyaeA#B#xi|BWgz%Vc~81&MSacm@_6`NFju3GReDlV2%0t!Qz z?O+^n^+B^8pEuLXz=|vp*BH03q7+`>!}VyXA5mZBh&R~W(kFyyk4(S`$o&24*SD4f zVqF9)Qh8{)z?6G8%5@05W7|>eB~_&^zCfYR$K`MZ(BuY)99{Ys^-euYtqqpvFTA>7 zifFJ!nX}(K^1O`rO%75ETemG1vGm?bv%iE9DraEiV>V?WY@pCw>?oC|(^ipI<@G#x zOYjpqcri4XJHl|aFt+;U;>MUvCWnhTYx0*D@?%xDv-b&Y+QnXf#sZy6yvp$247|C6 zlNo>ngtpZjV+sx=b8PY*bks`ix9W6R|p(`yvpxRDiBTA@9rcQdN6p}{Kc4l1`O zq*94VHoSU6jQku{4=jI}(uO*|FM!S@DmMBc+u7y*_7IWZvt(`2eTM}a`;2kmt3eyt z)0*iEncg4f$C!XcCs^3v@j54-TeQ~X%6{{I|J+nrrQ8I8&)Fcu?+#93R?AEk{Prjw zTH|m-neZ{|M|Dor?^VTh!fvN28*z)ND#jfZ2MUkMx9^P2Omo2W zT1cAX)Cf$&Wv?&*j>-T=)0R=6VrS|lrRm6FS{fXq(**Y9mGC*eWN`4Ww@1gCM6`e;f8jJx=Fy2G&!4$$2EtCN9UHKesAXRScG2R-hpY59XB8mh>f}7 z0nst3P{r1<0sw5pFvwxslh6ZzA)$Ut8zQ@*x7zRjfQ^H#_({)S<$D}bOCHUBqYdt))iQm5s z3>YEe0~HE@xKM!sY>^kgizcY)GEMHbA>f~m)qOD6o}Chx9dA?6FqmTdAiN6U7VPrj@aA0&_~hKX8HQ0D|Mq%0UQ1$NwamUp~6z zS!voQT+s3^9yDYs$0bLO9EsnJekV9Rp*QtDB>=fsr)3QV1;r_n8f)fQwvYhG^}yxW zbRFf&Mydf=YB}ifY5mFdXMbQbQ5@WRaPRXb~lRMFHyOpf;`OQ=U@o z)Z=EEaUFDx^+|-dy`TJ3Bq9Z@h%9{Ul%$0z1&zgxo3`z%jZTDevKZ!Z_-y*=d@KGV zOS;#IBTKVo-CCcUae-i0ty)hDNSWdku0RjlcU2!vBDOp++c`Q~>rHDP1de z@PCmw5|Hy&J8WSshp99!;g?qn3))L|3IJL47}t=$aE(UNnNm`C1Ck$2-Q>{f14?2O z)$0(rRWclKtm1qt!qGSIH)hpEgRDL4R#ZizurDVlQfzS1W$rx~Lwyw7HD87}*!hYj z>++nbEVAH9UmmQUjM@m^cS<}mr4_7QxUm(LMC6p*>+Gh5%1l)0}jT(6mc@+7DTfEZ?Q@*$vStt{_T9RV*_upUUebt~|6W|dTNBvl(f#|ytHS@k{fXmxAVh5eDHKp)Zh5j-OZ^wld> z@aN}~@fdd@kyHY&r-1bh@w7dZ_pszpG4m2HJQ8a7#A|7J;vs|}*V{ku`fnh z`;&y5F)a8C6)!cc>N^J@nUxN>t00gl$Z5%_JVE%!6Gg`)_$qOJIM_T*O2dL-UIU!9 zN%kHg5M$u7Sbmj05`!K9ZPa0T1Ee`d`^iOk@Y?-e5WcgK`}7IfG%~m;;{!ZtJyACO z_4T#kiQ6JC2`c)l<|TmL^6CLJsHxn5TMGbf_#lsQXx5RC?D0cy(Eh+x5rY1zrbhw1 zqRVc7EZf_2Z|NAl*Yw)FG9$?Al@TeS^V8xL6dY@%Px-^mDJgi&kn0 zFn!-9PYuK`Eod~KXeE50Mkgg1%92&W#*Pebr^=>J!3JnPg1;q4wjn@{0CptuDG~9T zND}(=uI}^Gn`> z7%C_zWCCzgd{Te+^(E7eq(MDDGpT9Twg#x#RIKhGg0ce$wP1zY+J4#2X8t++>r{m; zNx?>+!svOqZkRe^`dkGh8EH`WIV`+PTESY#Cw#^dEEA>V z0ps3Hv+W%KCOWp=0CH$lY#}B-@o=wDXeEfJ_7C=8s56Cwvo)S=m&|<-$oQVeUCnNE zfW?Rie`)WwVk>CNv&)Y+S>%rN8kHpG8GFBXN_}?M*GP@Ts4)lg9ZkVCvizxjQ)}=4 zyE*^6Luh{*G&toVA^{u1@Qji5bR5elEGE2X&)M<(M3lDmj1p&S>-zS_VF}_L4?YW# zG!`BRRxuZLiCV5}_=Ni^qBoUOost!Yo6R-4V5Jpg)4x&I@udPj*r9EcvvBUA3zrMh zWy~?r{6r2^spA`r|L>$g&cf%Qk5;-fMtSP?ilq<+WEubW75+q}uDcWo?xNvU3|Y#> zO`)}RO~UUV`hui9eDm{Pza{BceI|HZrE)%WzyY>Mq{Faw{HH3BPnib_5k=7Zb&<~8 z9ESc83QlG?aajinsw_7`JrQ3^)uiMa=OZS^Y7X{a$A7>3{UE1iv7iMxCiSDlJ5^QU zxvJoc$Zh+7{Vx|;=$?%nCZ=!sYNdUM$f=z(31Pi?bWl*YT^{|L_zCtO`{1rnILIO) zqHMHUa9*Dr^4u6erQV#K^(%Z7SHcE>p~XxciTE+mz_{nJv?}EJ!NHP|iI9?9QDU}7 zf-K_UxFOQ6&{slL5^irm_P)U#hG)rxNDP4sVsw#kvuSE(5Ec=gyYfSza{fR@dr|>> zg3Nt5uI~k651c!ITYWy5xbyAfj{Jv`wj+Y8;Du=Y>Wy>0N09A{J^6^CkAc9#3N!>{ z?oEJOPN{bk+T{l$+=mZ3aE+ah918!{O3|fnFB*HNpYgPX~u%US9caYt3YC; zlCI|P{hkU~F#jz;9PAF0!h$Bk@b)P50AN=rPwOdzepo~%aoJ2@f@dDyo`4=;WqTm6 z=}1`^OQWcvH2OCm>1Gy`2Ynuixgg>Qq7~`46FDO|GWX0W|9vD@?crqL@YfnJIy!rw zUxbE%yzN7 z*nqbUKmS7|pBEAJW3S;p&yTU+3mWH#>cFwdMMkn+!@+X6UCUT=3t;*Zz4>oRRj*be zga`_T#Nyg*UdG2_p<@R;-@E_+tJ|bR<*L+twu;kb$!mcmNKyjaF<@jtJcDSC zDg`}S{D$mvf+*HW^?#T5zk3a$^I=d_R{{~0P9G}C?+pj4(|W__+p^9|9Yjvg6}2EO zJn91Ty2fS(v#v~Ya7zm6Av;$OrdJVnQs(T=Z4e@-6;yc+d4ML@Fsr#RwL<}@`lJgA z1@?cJNd5RM>M&q7rq7*jfxdz;WBmNlPwzoJF!<)Cd#2)}z9QT6RqDouLQ2)*zQ=#i zDU2mCnXMZ+Z+WB*${|0k*B4tCt45=%ISRI$5Y%OnU@uNN#C>JdRWw^h5M5y>t#3`A z{Ay&wWGEd;79gNSUr-uv4)2sk-pXoW&hcAdsQC&!EH}lTY&+bOl7>buVx>ydmWK|Q zb|~s7cPvsI`;|Lg%=!On$w%^)ZYntKYL=H>bPEbgx@fpL#7Ob}nRhHa5_0OqcK3M? z+H0S)@NV(gxHjfF+Yl-Anl?Vf-SK_;b7)_dhCkHab2|Y zkvj>Ar*>e+#+TEz(y@fpv~Ffs)I!y}(R&48>%c1|Bw;Q=Z6L!+t}Gd z0$j`KBvjk`@S+8J>J0o6Vz`Ku+`0zHs4s|mRZ4;Ui9w+4OC7TVB-W|Ir}vMx9GsJG zyGH`=Lz8Jg6zp8IHsUYv*cpSeS3}!Jk>&eUBkL~$;7%tu8iiig`0k+XHpMLUC6|M8 z)m?!LDEIXqCr?y5sQl~Wky^Lw2oH*1tFB4 z?5wlgxitBmpdFFJw4wV6HO7Z&#Q>269P!PRw_ z*Zs1V^TFTmwZUUGN-6UdlMiY3 zTd+`0JaKEnq#|jBxgXptH_VW7ek@L&k^D6ayfrhesFqHlIg)P%3V`(G&m7 zv10`moQUoG`>ui!^OEr-7xNg(qrBE&#Vt*d`?`kc+&3gNDGN5V8Mef z=>c$vjp;pQAjYdtHG{z*bJWIp=cY{>gq~uJ`}6S&wraG^M4&>UZILDgmgSw#oW%OT z#;2rYOi~j0$U2hUMJPYOV|lX{4k`CG*<;8Sr8U+XvOyIj!?Q+$xk4iv`JSPglMn&cVOE?BHwyyd^#9RP_`Z~4f6Q3-vat{0Z3%WhQp zeuki<-_L1dLn7`Ri_$t`k%rh-5$hZ_R`}*vva-LfId?8Rn1mQTU~2=F1jn~)`*N$y5FndM~22>IxR#@u2Ipzl_LPcb>M_@8d>2oc9-nf*8qx*&^J`e4$YQ)VGv} z4!^`Fxuvyg@x;fc1@#?LQt~I+wt7shN5sr|d;QOCH~a!7uPyVD0T7uOEA}L7lFEvP zlvD!yHb6`ZqTJ%*;_RFj;qK3s7tg27TthqXY8>yu`e9$LtDP^HS&v70n)# zQ+uYUy$n5C6d?rcsP=E=`=8WRl~i=1hqlC&a6n1y@#V{+lJG3>h(OyZNGU-)Z&|yx ze;hhvxlssyE-5VqTA~QQEo;`u(8B|c5KBqva4-`Gl4lQs(- zNGHIlBde97JpFz@Y|4o@v{nRK$uV_i*#R37G#9a@j{wHeXrTq+H=HeSCb|2BxRE&E z#Z0(JE{#jJfcpf>%(e5+8l@nntmzR`v`3A%rnIH2%=Bo?G7Gsc~>vv?rp2aEsf zzkhE_N=xkdbm$`jW=wm=L|)X)506c7MFR-SvZNHmv5_-!@Ac75Yf_;%w>Ru3F#>!o zzN{;(8vT`bWYo0fq|aV9{ve8ryG+x+?@)^V&620i$0_irN{41zd3`p?>9Ar}i;4|K z5`w`bAq-61m~NV(pzdRw2|?7FrHInO@X|^DzP(;!_Ns@6E#m`WNaUl=_`B$(tnv;G zHHf_ztPa_pbf0HA9Lb|qD#fwc3T8-MIBWMlttPRgfI^{*L2 znPY$AZo3@bI?5|5yu~Q{H{m@!vYR$~qwFg<5ObAea9Z!QVdaXo%#@_rR1n{`&joL4 zv%Q#YT=Hg6k(m~d{4Bi01)78=ll^wlBb1$8)$B3>N=p>y1 zo+2+u%DwJ!)2{X|H!}S%?!+8g=#|P*z(wHe9IR7 z)=mUQ1bK@)@sjj~_!QS@vI}`UGZvnHwbt&#KOKo$i}%brQv-{>Nr5%oiM1l}gDuLS ziuT?a;k5OyJ;3xqAugXQ*YDlfNY04@Zb(T0hg#GlvDOb(o~ZW3l*?%Quj)>#`;ZLX z)~v}Wv5c)55DLQbg#9>0Iov(_4Y`(n?a&CK5H;qMk)30OALKj4E+B8;kI`{Eec0o& zok-KX*{M$DGr_dN^L?Ic?J|;ai`#mOs(;XZ6&_J0tpik2m{Lv=!2ssC$#u$qX=PsT z)4{v{)w)_t6@$DExfIuHsAWtf$Gg5%5&N5XbG?cckOEN9s(uMT4e1J@zKBLn2Y%vd z&9e*VCLK>I6B@~ku|r38lY3qcmBd}&oz|AgHF;-<(dq1zMyo#e>?KiHkF62(O8&${ z@SaCBcbhr&WIzEB#`=9HTu_g1;?eXxV^k1B!BT_5g#>Zjej7M%cjC>L3|4SAAtqo5 z#|Ihp)Jmq`TF;e;(YO$rei0MvMkyL?6)5I`y{A0OL)O~$&KsWX%le%}&mZyUL_%zR zGCuKp&y_r`VMF%V$lRgu^qHGri@+%ksNZ${pm3e2h6o6IxME79kpXTaww|7dX01)% z2|Q`RlZ`7NZx+?sk22woeTVEE3E>M`^OCjS;WpRV9u=8^&D4R|Q(&oHxv z%JSB3cHs2%CoskwGPdu@d3lo@9$qR97Nab$S6u3p@b>tl&}OCpfI}#FslTe(EU-Iw z!<-YbA*ia}FwT5ZuW?>;1(8JPbRKmC5>n+~YoIDNkvr=mDCofi_%#$kKLBOV*dz~n zasF|>$$)uYBh_t@lrlHBA;$=h)MK52-+3=0hkp9ZLy&l}J#`fB2ud`-b#=(Z;~plQ-7|R@6F{C0ruzl zxM*9(CB>cVF{KDAo;aw*FIBXK^kouCgA#fpKAD2YcQS4pP$|Le;?eW!z5T|x>n?8h z_^@6&t*W9jqCO>Tc-q33N1fSo_yvoURP-vd)$ME^4-gG#YXA7ON2O+YE7ahYHDn); zvN23IR1mCS1mNc6R4|hj&zDM3B3W4e0)z{uxNN%CuJ?6aOXG`* zDHt(w*#8}qK`iPeYr>d}qgN_!V0EY&7lzOR4PuD+yG z*7?+t`B8nN#F;$k_cN*J$o@@Z-=uxgto=+P%=NoBqsBG6k7x`ZMmxx{{VC`uf^lL$ z_R)1`L1(bl6KX{;BC}AWHE>)=(ptYkD!fCR2N7Qdp@R#Ml;y*w*|Pt#L?QLl15W*0 zU`7`M?_eoApeCKgngL5T&%wfu?y02jWG=3h>Q{SskO74Z~=KWoaD3pZ52-G{UP(_!OCMRYkj=V-Vx|ckJ1pobIz$8vohBzlRCj z`@8S)Do~8i$DAyjF4pnjw{E7dJ-x~{Q=vxs<+KY(5Rt{`5hV3@=gnTE%?|yN-O#w6 z;Q|~HCBW7tf7H;$T)p{!jf~G#S!(1>&`YR{hjq$p?SG75F zEyq-X`$hQOz{uclWA3TP5?wC4Dbo5O+0M453?ZMO>UGLCx4U;umo21ZdLrfjmhjpi z{4)f-UPvlEBPOo`#f$M)@BwjUSQZWe9itnHd7| z0ETDD(31%Bq>2iN*ATK_@B<9* zV5u>X3jS)tnXeI=i7`DWBfs@~c?lbLv*yc|Rkr zY{Fb6#3w~O6W0Ne5O6D?UE9$LH(s>f@W|aj$mn`?WcK zeI41JJ`ys+c;mQi=%MYz+)fQl@&{6Z2tdB9=H%o}{wAx?dYf@Pb+wXRRQW<@7DEhV zE_^%H&urhzqs5Lh$O7;pp%j2k0^|85(d!Z!)Q}7FiV(zYNBNX7#|-J28BEV%uBw3S zfwPV@IBWnw-BAx0hCqE}zSf&D%S#9z)MG%+>zTqdP|*^$@EjSwvgaqsqoIoj{Oaus zt82?gWuR9#db8~Qp!x<1WkaACclmxWjQpIU`nNV~G0+{>1+T!EErXdn+IwkY5&{%JpF#$aA=u-kIeMEe&*LC*4(q9)7qx*9Lsb~Y|6 z0ko6MY_#RPZO5K5%(*K{cejTUMg4`6J}kGPWmUcaDX4(ReddO|t3w+wf*`*<47uDq z>KWtox{-JG{7F{Q6SO@s_cSA=t%}N$v_10~y2u1D2lSFo9M`LL|7a|C<2x;F&Li%i z9K}@i!Ja|Bw7~+zm9=t5L$tH}E2}Oy>)Gb#11@DXz-1#0)-gi5 z72CBGtmECRQNz{U>Gb~O*F0uvS#4Kz=kFh`uD0Vz>`sq4vrd2ahI6&&BZb^!5fGZo)qCPKYbxk*)Cr#o?Y2W47PILY(&JL#2l-1m={BiH1i%^YZWov z_1@QSo3BhqBpch@D%NAOTQKlIYk{GFr9(Y5J2F!N*+>^4V0;$dVMK;#)^4SvG^Yw0 zk&df8Gq^mG;_Do!keG0c?r$E#-&TE@GvMntn zdkcpowb2wFOE`64f2wV5=$puRiMW|hsnm!TyVGH;SBw5TaJKrF1+82*OQ_;J-ItHb{?roPo{V+%a}q?X!V}4SkLa7ol@g z_zI>-&oA3#-I>@cv0^{?+#cZ~-zGlm!hh8gGoOBSIvA>VHoS?J+UmdxxGB*C#)d1G zBi6S*?9X5^NUvUl5V%c8#L#@D5eXw+>Hcd?;fEaeR>AA*M5d+6Z#Shp_E%x>Y^e$$ z;WYIYj!sJ>98FG2P*C$A6hCT=%_pj>_L}-)+v<%FQk{U=8%q=j4zYNxe&aI~t8lk1 zkj8BnzBc~Ydf#A!RJ-hJ^a}3AU81k|Y$AEZo}Mp~s(9!6e7_N7Z~E8zlM;Yvaa~F* z;23tZGTgm?@hEc80q=)xl6kVWqNTbdB*eF4(9gFM6+mC)r!m}6uHm4g1NGQ$`A5k1 z6A{Z44*R8~<0xO3M92W2i>s=)y65cG$Y_9HVPWT!JHEgpQ=7)G0A}|droRo}WoMWD zs6==|eOu07C7G;&Qp|eW0&QYqEg9hPuK(Llt+8gdy4pPJMyfyu^Q2rW}E2Bo;42A0{LQee0tvCF2HXHqa>yvqOeO|V&jQud6y8d?;Y(2nedQ&H~=(g}3 z$P~y$M7V7=7J}&y+Yv};lhrO-kC%f)!& z#Y&g{aCb?LjsP35A_UVB|Mf^n=FjBeJ=#J+b`xODzTx(5`Ucv^&tsO$75#EH)?`y?CzlIFIvEj0=3hp~ugI)yj=~h0 zF$i7jM=j_2j!btVhQ*BN+~aTHGhpztn~{m>R=i0mCbEZpuZ?0taM(12w}`BV#9WCo zpuHo;sQ<#0-}DvT(JE_w>bWFfoELHxYHNC}+*D6Z$@e;xu^Xa1iz`vky>4DqvZ45= zjWhOdjvSY`+4KX}p6g68J!Eay>4qMY-}Gb;XG{rbTK?nNo-|M(TU?{yn%S?8kMh|y zM6O4Y*39BgX?tO{d>?aLZTW`VG@h@4N}9@QfE)GaQef^KoH)T6Xh5NBdiYvLJA-Oz%i9tw7ZjJY56k5axv{$4f=j-i}Tuf%DT}P-n-l2xB zIm9pK!%Z+_{tjt**X$6~E6s6VSA!rQ))~L}E|EP}N9r_2&@z&-xSwH7qT1_``0db; zy+VsAhj(bEM4T;thG)+^ue*78pn@NlUYpz-;?eeSAZ|TG$y|fOvv|kG?tlpE-qqQ# zUyL{3VhR_0)5w$teTS#(jpcwS3*H!-;{O7ujx3OQW}YXNAv!h-X*)iYS?koN(X5OZ z-u1d1%r(S}w%$%ooA4}u$fBUj{+e^Ox;9rgVs_jTvT`iW&=Y+nBqt|DE4o+q*SXixLBL4viYLi9uuJ;(2m&!$o;Jz~*Aw9^jQU zGzNI|e57f@U$_;c6!DLP%vHZ zJ75`xwsxYx1ZV%ng9l!qMMU(cti$0i0T%CmK%}DF8ouw*4Hq}J{T+%r76yUr<)b&q zp-+wKY?k|xK8nN;AfSIuP7lllst)ao&L{>3Yp^u5h4y_9XjnKTBS4ein+WKCJa@u$ zmAQ_Wnb{G?$14e*PdhzJ(Jcwo0r?OyZyjcXHH_i(2NW$H+xJsk<*8nbt|BM&Y2_OY zZj-9|3yXI<1DjjMHD0G;AB2pE?OM(SW5W9B3q~@;7cNP_D%`#9UXg*w?@Y8WLqMs= zuJxAVO5_q7671G0cs>lxa9IwX^RsxZD0h0cl9^Li7lK}q3tNvbp%5*!j&q;X0cnZ8ffGJx3?_WS>=j%+cR=jTb>TEdIzpA+3 z9A1S3mPP#646!R+?}pn+sK)h@wyyUh!Dxy-YVjTcxKO3f_AfrlAk2Pv%<@+ z==uJ1%I4{MXuQ?}hGfntDd|c9Q3nVOH4U^QO5<0*b#Sgu--o=se9(r052)4}^&U8( zlf_m%Tsnmsfthvbg*ljdDnL@Gy+C(lBj ztUge2u>>@5GtfgEeiFX58qldllJ>g$)TGCToX_wW8W81kxN9$^$KBHIclHpBeLPH6 z_E9U1(0&OYxjvG7$z81+a7&|H<|F!GvFZXbXcM{Hz6U2%ihB5~5IE}PXN)-uzH z8odd#N&5ALf}B0VweSh;#~C5>rkJ7qZ3!@g52#jlts$Orcy!*-=bOLfpiS}}{_Ho2 zO|h(aR&%AIs~lvsI#8I9G@rIjnQO#%n1SSe)nnE_wk&P2CW@RK30g2$L7jk62p&1R z5#5e{gQY0|Sfre`x)2>Lo{2_vy@0RE=LaJA`Piau0EQ^j3#4jQC2G1Lk#YL&ab93l zpJ}a8A7NzBpnXor8?~*h^=mWBn#k9PN_2MVhFpEg11)C!1c6vskBSB!iN1ftbYv0^ zD1&t&!}2B!o+)1*F2+#N3csu$f{ZRNan07LsMpzP%WF7b;F!<^NlQ@x=lH6lq7)3v z11-qJHh*2gmPDoN-rpLuvXhwN3J|2AKqhl}1J*Rb%l53MznW=NT+vWgFp`fh=tqB% z41oNk!6rOK4384n@#2C|(YH#<12uF$H}WhHFAz7C15JvU#v0Sf(v=q<2)*BenQA|< zH^y514CrDhyNTog{Z`r$(AoJo3pcLrEJly7YRs3fxMp%ptZ1LMNON*>Z`eVrXg)Sj z3{r4EvaoGAMCEtg_XkO;gGfUgYaEb;EU3jl5(a}sosTUp9hCX<>wgzjiP7F-+`d|)lUV#}H{RX-+^o*z~T2Gkbm6XnRm+*k40L1*^ud`sV zUXWXBP;l_nB7s?!yoVGmB;Twq57PYQD>ja@nr3GKl&EhnS|#)S7CMOdxnj5$=f;T?QvdwG0X;Fd&wk5b(+dCV>@Vi29= zl(x(#`~e-#;%A#+8Np~#^s_5|vIZxlPtu9t;cxsz1&N3%M2QAvtCm;U;}<3M+ppVS z3(Wa`aS{oCes2U3QiUIs+3$KfkqE)eM3;10E@BH{n(94*)jxb?4MylQv+bduVXqHZ8nu@x=&?MS|}mF%F3x+52V)Ls`* zZtK-MHd+#Z&IxpS47 z?%V(GB2-AoUfIgtTXtlGN<#Ju*?Zmg-m*s$Z8ADGAr6@#vK?FY-u$m~KhN{K>wcd9 z>!nxgobUIuuIqYV<8!_1i{eYBfLbOhhKW}&x!}C76tHy(1H&>8E@@|QPzYg4Qe$z% z9hH_@*S^dVqJSnWn-TifSyz+va|2uk9h(PEL**tnv9}!Gyn9nnhzO|~rU{`v-2V6rV*mi;FO?>~4DV5=)UMM!u6^`skLTAKUx zoSkv;frnQm5!BQRCDl8x*i;8E!lmi>IgKWR6F-9w}VLiWLsO&Y)JHxK;;rmqhopb(_G0 zpaNs~(yZ)x<(HSHs~-CfNt(ivN9z(cBP~3p`$|wI>jbjnygHnvKT;lF>PH(L9nJqN zHB&{-lvvelWlR=eMa<|jd$!NAbb2X!^97<^1i z3s$;QrJgCmM%8Jg+@=v>OT{O7l-j&i|Edy_g#o)OL(zG*p)t;j-y-p#`Mq%)eW9?O zcpIA)E{y{rr~swCai80&?YG#PbIaCxE~@rfex;mZ5*H^_G?B#!C8QuxuzW}cECW!s z#y~(NRkFKm-h!l!8>-@1O#2`l52Q1t+o5X@dF?p$J$Iz-NBIb-l)y;lwGRbaD)aF> zmtR&7O8K~d8|+n6_}4H+D+<+vB_GV6KEmDtcb9xGIc8F#G5 zO3jTth)K?o|EKa>oQT^osFcZ~J)^_(!t|QP!kHf{&`-M+nA7spGyPI@tlT0+C!2m= zWkB>%+D=M2kI%XEI}1r_p*%jXIo>p0CA=1a60}t7BfhHijVih-;YI+xmht|CGWb1j zV)eU9>bsxQSEp9>J^}8XHiPQW`$e71q742tDN)bYUTT0d!E$~CVfDTUiNeE^?Q@R# z!mA3*C}1}6k(3l61&eUhtgt@XQjYLC1i8zKP^Gj-k4!hLS%2YIO^*nRXk6LtNrFCO zg8kF>)nj0KySViLSK7a?$u)w%sX17hH3y4tq+~p-w4~YjBp{rm2_@TcaR~|lu$NM} zfL196V0}{wJ5_pNF1ctZAqlzCLsQ1|f~z=~|CST&$Bz{5y1QdaF<)m(ll|r#O*@T; z2Yfu~TC7y4t5+?Rr!7Yv=WF4HIQL#g8l=e*QnH{~iD0RP8Ve_9wwiu51nty&^L4Kz zX&$F?j(KvM{S4(7nBI!5bm+|oqTlr|69&v242cDxF_aV{RcO2qMY28DeO+zFRLtZ> zAn$E6QWNiH;P)fDP-LuyVWIuoYeEsfSb9`oc3yL)#=k43~bgXOw(Xx;@ zfHsX(!1|!2AN-UI)65hh$0n}@*?P>CQN%O2BC%g^7t(VNF@mb1SWhsoRbvyQtf@Ri^w|a!}vA){L8m1z>_2`uCexvqlb}$o00G zs>$}Eg=3$sTS984jMLTl_+!fayXjL%2;V}?AqFnlVFpYGPRyG$Wa{kuNc!&YT60z& zVma&2%Rv{M>T+8nxX0TzRE`a_gO{%;{_Bw6QxLbR9a-0^tf5Ko!^7+Bu}2#VUmIHA zD9wn4C`_vK-06?U%m~{pJ7U&+VTwWmA!&2E&s@@0my#OcEK|iSUNKA)#0OBR1sf|Y zS~Wt)D2m%h`dmcalofw0f0`SE1Yx$dMy$*h6LNKmxph0^xQI=@C{nS<@it3I%8Frk z4IZ&=d;ZT`$xXtmLp<>^3u+5bza!t!Uw-Vh@ViSIQ<0}uwIsAOk?$GzKks;5% zQo>TXNkij7BMPckjMC2m(%}`wf@%g_p_qtXLi|^Vf{4RmiX=@4E%hd8Ub@SlPl<(! z*MLX0LkH=(^XH^KeZPO405E0k%4pjQ5x!!7SN$qJ=j}!3<$=f~k~Cm5>V>AZOnHaK z1A`cXiiFgZp<;}fiuFbem9zdV2M!T4+qh^@Pq)7V^$`KblC)AS+|It2#(Ff;Ta4IZ z9)XjfUblZK#lVMp3&Q4`@|RnOt5X1lLEZO5aX*^Gu8hw&wWMB7JiksAt{LH%ODwHy z1*>eX%(K~F?7TRh_`N-a*RBgX`Rm?+LNTbPGhG>Ly3;MLG*DM7e$Yq^91+}l!eGo4 zW&2#vBM9yLaaTPKIyTJj5zXM?v{sXYCz3~{g5Z{ITtiAB0S&Vb}h zwfpY(^4ltE1#GH)`)tsLu1tlBvz*d@azC!tPDrfW=1Z~J2?hR)dE&Pc0hWZ#UP6+N zwB~fP$T!ewkrnP*G&pqMTDQgt2kk=?!@$lHQLL`#uNg6S9?$o9Cz|Z~(vP>+l)EATpB8r5|1z_%XOjs_01yo z_7{hSB8m(*B=9?XiqA9OKgc)4i10(bvo_tKbh@Y>QPBJu|BETJqj15*7j3OrA<@ii zQoL8lFGl^u5AN3R<%~_(8g9M1WBK~E*-F0$Zl#$xDqMqV4UQ#5Oky^BjlnMj-16$p ztJqJAgSRY4-rPh7GoHqV0-A8O9SQ>lIqTS$WIJM`TaPt@2oH;3W~Wd17=66UjK&(?L#M+AW&r+=WoCr_L9 zaGUn_^h&hp+DLl=-qQYZpqoyWy(IRPnXQHqyPkIjez;A|XPk|@%cL7BoySyz+}Kx9=1OI1MlG@zz;o8 zX$ki`aK3hSy>P2P$3MSyfc_MiH3oimDg${DDdtia$|Ow)B|Xn=s$7d_xy6?L?oAJ%u+dx&Jm5!B>9rYf+~g#NU)Ls2wd%z4Oc zC5Zf1yV+PAB*=U3?`D{;O|(xPY(16&l}E2>a`8@7ql4kt;~S0qnzIQ!7_vN;w{FCm1m%BEqfB(i zEgk`9y%t0yJC4)OWyk!d7QpgGOxS{D)%Id{JQ*h`P`zAJH1iVLht2CU2+)QS!M-p7(s^&zA=9?(EdByq&9CjISO z5r@2Y>3wS|>`g~>z^ik1)^uyx-}(8Vyqy(1&;~nM?nR{4~;jHrRs4`Y=U! z+;2;OQ_4+z@UdikZ-OxuWR$j7rQ#^hys`JBPKh7eu@+b*K*e`=KDbbaRgb8k|0}GI z-;medN2hj$^x>MPc-W3k!r4OzRUM_PIW;Guv^A%^7d$G_dkP^0LW+pVidWb7KRJt7 z{{haciHn4k_~u2I{O_1U$*QYA$KIbJ^)3mnzOpg69?w?%oH(mxN6~5V;kiUe3%ml1 zAzfFc@SsL813Y9Zye5+#D?L9dk9TgRK65uz>Xq(-3;gbMgBl;e3rDDPi&2HXFxVsh zdjrN7R6g3`H(#47Xi6Pt&4<{U+2Uosjqid4rWaHMz01NzXT|IpUAyS&^e@%FbbYnc zLQN82`-t6Z!y0m)4Rbl?!9}k_vtMb(JQ#Gs%Hx-BXkkZq7XwzJdkt>eJ#=ax%32}UWUbL?K5b`4;Zk&lV2l2 zPoI`2J_RDr92u(1pUPNKuw+T1sbFGSK5BK-mD*xct<@g08~CiFNiO@p_kT296{^{8 zl}IgU<6;xxz<`KJj7vL7vC|e71tsOWfZ*Ln_gde@o?%b;?p}w))9ZI{uD4QBM1Th6 zJKC}WQy2n(4)|Jph%UChfzW;G<_`ntsiF_|&GL28^er`#*y97y-L+u=5V^0i)t*Eq zqaE*C>q}cfL2=pN&Lej}WL(5NfBZlm*<1pJAFeAuT1JX$RY2)WBW$%GoNH@~7qcIP z2?0WrXG0|d^C~q#4)d9c59KhW#b{he6~MziD%|fuGQo&!w)t4_!gurabkw}3bw}Hr-6D2 zfEH37i+1Xo_uL5}(mnm&N#xPIzBjot8=AFAx2RT5-NJiV$lMOU-z>JJ zgtb$ga;(-!Z`9Cxh=vQmR;$F4BbvPbMUF}<+b12GI~(heNHaCdi~F9{+KiR?3$&;7 zLF^9Pu2S=6-hmH#T`9DFVURh1KADiHyqF4V8A#^Vo!Z9BQ=kEjj7-f-^D+RnK3hlL z?5Do?Fo*&`mcW~(N;f(0KC>lI@?=rwvG?)yfx?Fop zF0W1&eSYlDUMeB6o6Xv|(zlgxRqhp1tBfOMhnD?0;x*xzNe?9UCZjJR<9j5ZjiCgYjQ_w185cb_W@ea1QlC^Y(6(KVa}MEMmo)mU z+J3u!mtV%D(ZL;l^(Wus*-9Q%TF#U{vu^1znX*T8KU9)6(6 zKU{5t?B4q1%)2nv+l=D85doPt$G)`6Y@SyrT0XfQV=Lag!JJ&3?HxbDDHWRjRvB2( zqH8uf7@(y6(v=wu!4MQZ!TQ#naMjy0FZ)>5XRGbd_IfbO=Mr_*?ky_{FN7`dMnr|Z zwghyIsEWv}tp;gFlg}#KNrwp~fhQ;F;dCjdMkH8j)Kh41Y=_QP%w=|J>8a6I6Tkej z1-jVNgGUA8uCxx}Z8K&o`E>{q#Wm@sRQ?0}B+Y+|_~qCz@Sk#*hdw1nTMtg|9l(?) zUUVdXAa~tl>p1WfEw%8L2j_meOEL$91pMqR)QH_(oyRe-Bwh75DecY0KY=)ttA^*< zS#0R4=>QaF=%vdfX-rscxolnh*DvL+fGmJ%UQzD^!7@(T2mJ=+}RoxwzfP4x;EDI)NC_+L!N&KWTLJmCx_*GdL`HBDK2lB}1 zMfpw|Q`~qmB;?0u=5=Ww)evj>b-jw*vEBXfF;7H+TBRR7H2b$XF&1ea*-G8}44IJl zT)Bc~jT(uRJXAFPbmpMl%)2Fn+=7n*s-SG?sxQ`8>lblN9vYb-s!kgIMv-7j8AzCwHq$nWl+G+@2G zI27T~ZO|EBpr5i@b38=#*mvgf1J2{@h!Rcc-0Sd#iJ8x8lR8suks`pkX>uSmyFh>b zY0ci|1lmDtJk|?%^l9x2lhEkb#d(7WBQnUunH~~ zP=%R^A+bE$D?ndY@a_XWImna6thcf<>uq9l)-Sq(Tl}xsVY&36i$b(W7R1sRp{OTw z`NVmlKX~FT%i~lT4V1Ltd!6Qz;L8sY_UBX=LmF20BtEzHwQO3W2?ei+AF%I@dlEsS zyyoO*JNg&@Gw=%4>yJetTrUhnvU}p&WX-|YIuNpXm%;_&n$Q0oP&kd(DHHUru*ank z3?QBGO&KXXVgP;sm5f7@zJfaWIL(NLCiST_wlg|$@qoN*)I{xf?CQH`q#jG}*447_ z`*L2c%ekWk?@&oQysq!LJ!2`SAkX92vFN>ddJo`4+R+Z~?%Gi2r^K@kuEi3m$BK{D z@X_gUK-P~vPRY~7d>V(u50@jsgP=aW_k-j>a=DVI0coMol z=Wde=Cl#1`AD@fH!;_O!Fw$ZEBp?9oMTF3zt3GrVeuk*zzZGb;<>a+Lr(~m3OQl#p z(*69z17rWg%@Maam$fd4GKgjm#gbr%+OxCw8TYY7vJEHSxuWH{Fo3>>|QS7%%5gJ$OR-L(=J**PQK( zx6g4$O2TeOTz9gxsES%sf9OrjdfJAtMW`AmoeoPt)RYeZfn|uA7KD!8ySoUc7c4k@ zvnLcSaDrLWLvasa{V2$#|43G~86iH@lT0{-{>&#I!y(G*sZJ4&a@Bw^(#{&!=XEl4 zqR;pdbMHM{rE3|I`Z8T5)PNvRn^oaf&*WZhoLGT2OE3J^-_WV^{>?qh^^rXh^o#jkjBT0CSTYn4-f{xYLbG8pJE%yoZ+oT< zIQu4*RL4oO{8k+UwA7clIMW{E1G7~Cx;ae{E1t7`nyf7N<#95PCDB+oFMN!W@fEjnvd*G0s(>t9%>)^NI&O#=~-5_sGG}vo=0AKXjuhyi>#{C_BMTuyc|gdJ-AtUW{0Zmet%H* z;8Bm+u3ML!XD*QXLvZ0KMrY4Kn+?uB20SO9IArA3B1Nc)F3E$>GFF#5d#t`9Y@iN_ zoLl41Io=+9(RxKbmb%;bAMnJCp7sm7WTLo2&`S}T4Q`v%NI zs)>;fiJ#ZD^y%qLUYXT||Ke6M0X0#RT^2dcIJw7M( z!>~dRed~a6a&LO;$R2{SKx~c#j~XN%hLPSl>j+gUQrPv~p;D7blw&0~E~zq`@1+q0 zxbcP~z`4=Oe_8}7OZdE1I^3*npH+Z%^HK2p^XEYMAZ$%4a7Q_XSev^SPwS7v%O87$ zXZsv(G;Hh0$s_IGhqzdWPDgs&fsC6B-M$M9YTV<%qhxp0{L32?a?zjsbtjud&>pV+ zH6OG6W-6Z!_dF%KTO9JKM8$sWAdC&k(9Y{|Qu*q~Cl@}6L%his?+WwEUWaLCdl0Mk zRjj?KHzDK1L|&5(WGqsP^Peo2=@Z~D3N^_m7fsLID!mIOQj@6HtopKp0x{24d%wY= zUrW!;BNvsrd?94ykMf3Zchf^w2ZV|9*__M$R|Wst0CX+lGBl~_{WQ2WaFeUpZtrO_ zM|$@nQ5ULQO$ZmzV7Urf=RygC6FXG3G|f&~vgNP4%Y1I??gH{}$La$S)_kDzJ*I%P z1mllfDW2=gkBKWh|FGqrgxA5uK)ON5TVb7@-Iy zQ1X2)!>-F6!9YVdvNGomRSNCPTvAQ^7`W6T>HXUz!ax$xqr?z?=eC@^OwWJT3v@iu z50~;$1*rCWSn7BIQpHGwEmT->F?yXzN_rpR0f4Jm89xe-j#Y$^hxNC&ct%&RQbj|7 zV=tesLrpP9(<0UX%_lDrfmVVVemic8E0Sqyxs(JMfwLE>m_$(KygYGjUPTV2I4m(QS_{Y7I_Nof?hgY*^U^)D5L2Ze4>vT9=C?L49R& zN_c+zWI=oDTc!~i3=Qk=q0zw+fXCKzqq&sMzdr^MzJt@H#r}bNeRX%VtMOUh;ME5m19x@=}R>i`V3Xe+ua24lN3h%ei zphvlX6=7RyI!*<6AwD6wD=z-kizLtvXaD2XiZd{k?^D5+Rb5IZU38+%B>s~I|6^s0 zpc;9t{xkyCQPg!sZEyU90OvI8f2?#1w22)mHY0+w_m>2zb zv73@ZxPC+~yll$*2%$hH%>UnW5e}lQ%Tj{NoJwqOa499QRN-0ps;?t+1#Fc$)ZsKg|+yXXd`5c=Z#Cz;L@bnn!xRA@+Zajv4XZplLUHX(K3z zd+z-faHip4N~!(#TTgW%spe$i?Qf=oOOBRaT+9H5|M9#t2vN3^cI44Xat1m==6Z5MI`)klJh!PqI{L`|DU<;@+7KS}7}5b@YPB3nPycxXnA#OTuOZj=jIdNhU6-WImk{g* z!dJjw$Ggk`b)Qg&)n$ z%9b{-U+J#-f+eL|dcbbh2lO?`%{i-HVZ90Tb*H8j>4nfa7Ve@mrx5>8bseZO7ph(PqhRVqAiZ@*^x5iYYXgFI+mNdSd++tq=CSqtS<@W55;? zt)igD(1&>nXqjl&HE6K1>+9FV*7&16WpTpk!*6)&z@?;flzE{TUF4E zt6ZYGvfXcMsP^n>Ntj0Rw`_FI z1litLd7l)WX7jPA+t(};FkxWDXAEY+qu@+ve$cJVjBv7m*L)6o;|3X-0Q&=WOduos z`)A*zaH)5+zZEEiL_T`4KwsN@#qM$T{TZS%egUI&8mw3{d80M@O;2LD7s`!0uFY#m z&}ZJAmlrVZQ;~Yy_*kz);xY-Z?XGxwM!MzuqP)sCSN-TdFA^c$Ep{#qup7ucP~((1 zB6b=aJ!u8$>e!n4qpQM$E_ArO(wyu?s?)iCRDx}fSJFec|(T+cJo5)0;;0D;w{=O=??U9I2 z8u~h2gRK3b5DUB2Y8uBEOy>PMFkRJd+EhK^OUK<#$%^wyu0>38{-_RX9G6!~ z6%D*y@$43Nc(k=P5P9gnH`rlrKEdX79K{TYp$Z(F&&!kbaw~P47d-a=!>!xv4?nsv zIz5rge{TnD%|e3SV6%(9z5eO+*_mR@GiuulPBCPzhIdBCxyd=2j*6kP9 zi^}^^e4y|8bo_SnfzvkUVx$zomlF2#{&c_)-^_JNGU!4sWOd_0trr81cIfYG>?{(E zw`23DJjCAm@*bym($pyi8x40@ZeB}Viou^oWTO$6yht@*W;$E6FH4T8-n>~no?E2V za5$yXvgph$EC_*pyxrYlJS?%rt$HQE-sPd5#jI}ZRl7(A92rtVTTA)B4jSswqmuz% zeA>}i^cH)UZ8}-}*x$W@upPanE37i%pZ~>#$Gk%q-snsMq;Ua{eLk)T%PPj2?NCO# zpOQ-DjiK3!935{2*zdp_UJcSrZVzMp`9MshF&TY}?7j{!1>GNFM2AD$=>mzq zn@Zgs<|ki;;C>DU2G$jm#{TLB{oX6>OZlan?(7QDluAd}+rGhR^u@Lu zmMNv#B`!WI5(XVno2;d(>;+^)=b$RAK)Ky=$Km ztyl|}0tU8kcXgwCm_EtVC$0Fz_D89BlI+PYB;fX$PKNPpQ;XyN`{ic6+i5E4l0@jg z?wfR~M%R>@Gp!Xq_P0fni#(;zogq%p(rkM{sjs9ZEAcM5F2)vF>9fi2d2LUgpinY$ z>A7MNz2wf>DW3U@gQ%4S)PqHxUHsJ_3tm$7Rrnh^AaW zI40hq{L?C(c#rnBHc2en$6I}i~BStDhB`}R`}&ce%fHOJy!t~O>1 zuSZe^y*S4qPXLks3Ft47r*6EA=sJCVT z%j4FWrcOr&)4lSK>9RqLWH*}9efk+GY!Wb5Zo(>a-WVQeq2!WM7bh`kOj_J#zX58V z_8;F|ggDPzA(D;V&HR}i*2y|WnpYp)t~ScUkVC%PY54Bq>WEa1ikJvOn<(iD-`}+~ zs(>(3fk!0~jwlvBc}pg?l(fPF!A9)2h3^eJH<_ToeOqKpsk;?M%=M4=THU*!RJxr% zpDOCb=|t;|*~h>MVlrT8}i2%?-@Kdl8>jq4zV|*$c+eS? zZ>=^5I9&hxdrRFeL!WFfbDbPYwXKY2z3@FzQd5=Re>k;4_AZ(8MR8pW7z=UaeRplMgspPV8Gb3oM&i^RxQ97`h?2haEbPxFX+k<>FiC(Vc=s)3C=mC8uKEi_qOTmRDJI`?PB~*utg1odA~VTbc%s=02HZMVH#sn`ZHR-e{kS#zuzCAy>1aar2*LY zoxGq;B4?i;#F+M_dk=v^R8G(@h)e&Li4S$X$D!gEV+We!{=^y5UAYR=(A`y|?%fZV z0n!B%PF#RF0_?ZALG!$D?th3CL)O&n&K~JSdj!>w^nV{#8B+W0o^wB~tvwdPS=fG# z(4rEWUucZG&t7=(O;8~w@0a-p|L`<)S z$zto$mc!a~4ph4Qx`$t;iplO6c*gi0HS7sLa1Dcf!)>}%VG8$eY7P!>RundY!e~R| z(WjEIW}Q%cv>ZyqLZ)1xzw!BcR3aaJk17OkG;6MP}F`L7PCB$XY(iBZXm8<_^27=T$ zNuo7>RA1SW+7O1VZZxy9wK5vA=7P)d<*J!-MN@>Bs5vtSYAUgcloa1n6N)EquU5T$ z$gV6-GP@jZay&*#3y?#l&$Q=tamQKz{6*&Azcq?zzP2QR&B9{y|{7y1gmb4k1J@7t#=kI^O;H-nZLTk9b;P9J3 zx^$-Ke<_|bM zvJeOy^d3NygR4Qhfxkl~J%ou_=)(Qc6uaXcDYWt{Y#neLbgEhAUQE*&71)zR^AHah z{vu{D`;^&j*s_r;b;!5*MbkS6+$uz0H9I#7MePQI-FjB>E4i z1o@F+_XDBsxTMUEg$*KLTt}6N#58?nD$+2}y#uqpopW|iPGONZqaF#i9kh$;3)_5-#Y^;#$E~J9)E2@*5S-QDvkSM z6E^{;E+e6N^9$|ppr$A?vFwD?KtOu2zpse54T2hppr-NIy!^#jgS}m*f<9xQaBjP8 z+H`h?*Q$kGJUN(d)bk%xGjY&Qq0SgbrTHBhfLl>g;&ip}wyn!GfRL98+q3x0u-FzO)P#PDBKCb%*$-Njpe znnhY!Jux2exk7*&3=6Ws5c&iB4Zr{`H5V9z6JW1X{`oZq>?(98Q^0}d4Y9kJ}+u8oLD+gs1WB@-t`FJaD7$xfC_rtl*LR;FM; z)@Un;8ZiG2);XIME+rU}VbPj)$9y0wXtO0+{lYJSqx}+5kPfqDGMcO1OU#ZBryD)| zZ5@LOzangXPr0OKzFYV;W4|f-VD#8xo)*sKmVEV51r~J=2bQ z7@!sbGuMgxxARtYqlvq<^(*t}u=1Tde57y1{bD|>6bv)P#*l8b%4qhEp{AOr@|qPIh5-`h8+eZIFb!gasqp7asjjGrE-Mks2yF-(V-ho{!4nj_^%OL?+q%cn*J0k zm{3(K7l-lxE-zIjQSWU4FsVl4IhQRzBXvimNHx*oOl`O?uu?speK;fy)~Hbt!r-9i z@~TP#8YShUs+}$c^cP5F+sA7THmi%oPzz1H| zBTYF2v@995R8_`@8`#CkMogbd`~p;J5cB~-*p1#=41-p0wjZlA#3AEz(l6rboc*>~EE>lFpN1o%<<2je~N)!LZv)e51L@A&)#n zTEP|n+I+XheI2!z0v(yylKw`Y!Lf3m73wBRN|*k&(leeU&*Cz2y5E>6f?%Q+axp+5I_9STRF0trP6hCQAO`3lARy3RYTGAP zQ|);H&&5Uj;MD|d$CP>8v#dL1AEIG5rzf-Z#W!Y)v=DYz(1d)jVxigY5p@`McvOi{ zCL*r;e^{tze_q;hgvcEZ6AJ@QR>tpZF~8$U?l7{FDd~%gba1y@VpfQ)$LQl}Ze2eZ~@Y@*>3gAn}AU5p#Hn3sEdDq^a`Z6*&S(cK!mwf;bI7Pa4(Kdb8o}xMYfW zWPaV6hiK<5u|Wr@Eo6w9p4>Sk6ZB*cF|ucZ>h>R%9t^sI@AKati^ojI^>EZije!#jh1db=Ya#JDd<%cI-&{Hw(zwLI ztZ}B3AD?XbI8;^9;!{I&p-jwg6&IF|?s}2px!~H!3Oxu^K)b(qj-`o$l4J40>7K6X z4Q|sJvbQ&HzAk(}X|ADrKYVch6$fFmpsCsYoNWdtjq@2DS2ogq{LzU{qd zIY<|MPSX*=n<+IdVDlYHBakNVfn@~#9%4V_nX+xNOsxe=FTFtVtLH{g6$aC6ZCg@1 z7~+HYMtfQ!a#81AfbB?4xd%hx5~0c^4N-5#6y$R+>FYj+yRRzW#1ABs*z1e(;s)MT zzVx{0O0L$?jFtf-0WLj`{%^rZ8PfRl!T)N~#c}ID?+VV0H0k>OPAS-`R}iaYV3v#WlSYOxQz{VHGr11lyAp{;M63A?3B{ zcnHqeWQp|%3AmqYF(1B6I}nD%l7|k?Pb<%oZkwWr)Hj~3oeE5^rbczs(lt0xJ>b%9 zOiN?|(OXdY%{W%Y!TxUF0LG!hf^Re#!*EV$QxB@y*N0Zn^79T9m_Gi7G)J#&L?D}&kuW}rp zV?u2xA}gq*k8Ce^2(wRil(l$3(AbtIfj7Zw{)_Dczq1SUcpTel{6W1QCV%6KAgLK) z{1L&YbTpCi+~&vB?&2gcl18&qf~Wi}=1%TIu3b;1zUJe+WZ^U1wJaIxEP#ZW&5-sZ|WRIRN`OM6-jvQ8={$zd1l=^hKKj$O zzQUh~a(DU;zT*+8MdAQMv^Zv?d~5 zfw-uLW6qo?^&++RV0uBqfgr?c-RNz5*T!mWvZ1GV)KMdQ$h~d>WhgI5Dwx;n|(W(`H@akCvCC z=btDEBV4&6>vs9AE)1z04xEQ!+@J8uzAbxMJ~5Pgziwpl^{)Gc5kF`gA;I!S?6E(J zr|rg~+F7+C@4~nPoG<5l2uD(E5|aM=xiG=^{2 zdl47R{%xHqtacWLB45?*+D>f<7g$Y!ho*{-+N0inBzWqN*2WT1Rlq zhq3luiwFLfo3%pYdj^Qj5B!dPP@tGBg&(6e~VLu9_>%!{|6+Dab zr<|n9I_|$ zCN})}7>a5iq^gc>T*7A`Zu}VRX6&gl4AJ4!K_S!^o07x1zAc1rMNE5)L6Ol~``V-^ z=Y{eevwem{rJJoUJ|V(o+Ywp_Z|wLG5`_2(bN&_*b@@WYaAs;}#@Wf>y=9SbkXX+% z+wBYyLOEl@zP=I`<yLpVns^l_` zk457TNp|ZI7Y7}b*0S0IyN}s&-`^YF$q`A&pE$hW_4voF=^=_IF`2K9K1%A@&hKST zj_xur;(A!p^VRRMKKOj6+LupZy(6vl~N4<-#lr`3srl`3US4QH zIA37oDN(j0`BJDd(9_XXm;8v-)_i&1-1rfV9@1!HDZ`#RbhYU)PV8eGrk`V+gCB1s zCd*TpC^JcTujcn#o!=b0)+8I+c8)CPML7p$TqnE>y!WIDjpJE4zDIv~G-4^NE87{r zl{w@aR4NveFe!>(p_d%t^UB@^yKgD?Od_u`bvePi3U5_@>*v_LRjFvAavqK=E8>!o znWq=@Sd$VJ#8^ns2IudeDrT@|gkM$vlJW~RdYAm35Mr#7cMjQ_^(SRbAFmQ^lj*1& zjBDLnky4d88_HXMg=sY}ia+b6UOb)hPPp_0C)&c=()vaO$c=sWgzEgnNxA7Vf#8egjD4JOB#oq-PS;~+ zbF-Fy50&XB1^4J|*IElx`qd;2q$}WaO{m4jH!mBMrMdGfaNi7uGv3qXqQA>E+&EQq z66MZ#Da7B3!W~1h_sE&S?;))o(rj;Qsggnu38{I`nIHIe-t(l(9!402)5fV-Ay$O9 zuf&hl|4G9TAzVAlJ?~02s|@+4Biz#~fme)Enb-JmRoZ103(Vlpf1?95OwbF0k(q9< zDin=i+Z%3>zz90BChBr(i)=7S4@X5(`)PE_-ibWwDp?bURcW5y0Q?vFo)bcIJ5&fMb9_%@IW{C4Y2+yF#y*v0j|A7-Md!g zv%x|}lNd|*Rn6?Dfp`!X@pFCFo6i!eL$rJr*o%_|2@gAIZkGEycGa=^_t#@9wUoUtVq>oEDLGpIz1En76o z(kZo;tNzZAu2{ottTXKZdeTINw&tCC28PS?Ys_pnpC!M=!e<}<+BNaM!9|~NIK2_n zVZiL()P|RmaLE(Wcc-Twy`H8#hkbpOsV|rBwTO!Z&YrEL$Yn5yAFWIAqSWW(K`$ZZ zaJ(LSh6YBK|9N@n#EW~||KdM43$69an9w_MAgt+S>M&nUGI@E!E5wT1{ZtqsdBW_< zA7kY~R%0j*Yo`HrErdO@wealG9l!#QJvJ3FdlO*?P*vvaIr44#O(NANIH+NBH9HBF z#bXEWIKxMTc{Dj+Hv=ugLi21++jw1%9R<`zTVh(;erCiXEi@|Eh^eF9oiBlM%`I4_ z0;Gl7ukvvZ^fmr)l|K4IkYn#6p9gp7#1FNXg=t?gUM)H)vW+uJUC6s5d^XNQzr$g3 zJjnF$92Wy^Mq@L`dQT{`re9SAhkCsdAhYDN1Fj7w?BYC!sd!eh9$R~^)l=Ho#8Idc z`CDniap3e1w$CG{NvyeE%fk_QCmuc!JJ=Ll5>d@JlN%1Ilcn@EM%rPmvAdXior9G*QRx0(%>&5GijK357kgm9srwK#4@Q5j~u>ZbFxkyDS1yM1cK$ zLK@<*SqN`mE?!A{)}QmfF}!&r`2SV+ol#A$TbOoKngY^6rK6D&gH%zfbQDmcQlx`G z=)E1O(nLC;3J5uhA_0*q)er;Hl`0@zq(~9zFgu=m&s@))HEYe9`74{u``&lI zyFB~Zdmn&F_sRIIEYfJ1`<;s%kA^s2OO&tg${I!%iIik>4mG%I5H1`)T$m@Sr$z)vRCI^A&)>8-5Z) z0!7v^xQXAv-h}v#hZEB{UZT>hmTkW27%C(2;70?yl-l|C@C*U+lFvMYWQEMVFEc!v zr4@R5>Nn^Q;N@{c(k=G+eQC_@j)s)XCp5JAAfC%DFI0ams3F5h z%wm7H?6V;OUTa111GMd-n09p?OJvRUeWR(jw-;->JaZy~9d^H5i^94x{nFtE2Z!P` zWRCnxpF47(`CKWzt*y?wy4CPpfgk-y;_TVCB4O$#DM#vmmy_BUF}@MD&wvmRuvt}m zhA|qlUi@UN4q`&$IQu|tn2{21aGb8nL!#;SqC<^3P?E-R8q)xHavA6YBDyTRwiFIf_(EB`-Fp+CpT$#gN~UW2POW@afiM>aLZ)k% zLI#zug)0vpEqn9$-ryzu=NIVCejWeu z)L>xy#*E&^o|o00o6%mO-1O$;wAXyah~wt^jhe;zJuLyrkV8Z_xF&#1^fN*tOHN`WfgRvG1ohf{zgiIqNO_x z@cAAY^w#6;9ryPV-0kk9k}0C&9#jUzyzD~87>ko?TJ`RB?1N2C>_SaWcEB32!PxO| z#*D9esj9c1fwne{BI{=$g@XF{>16bq(UmPy8)?!Rl0J9trixzX1g2X!aU3`}T?1W# zCRcTwk7N5KfkaG!pO=VUU44xbKN+pOwBh?S&nhlG6)KxuKQe}&I96ur_4ZJp>PNks z9bF%Pu}8}=o)?4xL+0VZ{8%t9D^F)`7$=nn6O|^8D%#M-q9XLL<^Us zSTxYpfMIJ2IUHaQKrV_th5#f;;DZ9<4O{lwg*_}OUm5Na{EX!MQ17@nc^+OIq;{bp zN3Roj@WGG|j_tWc7pJScQx81djwdWTj?h=+bU z-whr}W#t;ZMMV{u%>$q#JemPyTu|a-UGN~bwv=$EP-O?NEPBfLo)~=84Yir-F7%z& z3|Z~h?s)S71&BFR?iz4@Klc4GD*;FIp>i&Cb{QXwOUjhU5WbG8w=A5fP(=Yt-C&6kNH(Qg)DYCdL+JFA{pob&KuWF%^ec zXg?)m*9|v|!v(|Wcd-YzKuY;JV@6jtfDK~Omx9+wzZ3-8l>;*8ebu}6Z>md@Qk#zt zaGl(KaAxDO57MoN-M~PIQ14fDd$Y@hoS}B>F(9{13Fi)j^zebkGrVGnW4l2qk`DVN zy@qDqyBVHqX$PiGucXt{sHxB+0|Q)u6&E`@ld~0|A^D-%copP3%^IJV3CtNP0)R@+ za=iEimO*E!3VF9&*F zT0331gFllNeBunHs){^gu`vHMkJXiFFHBY&JAb@fkxr>X?RSr)ti$R_#ar=NTy&h% zd2h8ldp@do`ckD0$!TFYZN*3GLNAD$lns&qkY1VoCb7L)@Wa&&Da*)}sF8!BhBmJL zH$>AgpWBK**><>QbXfnv@4tR|vr^bJoM}1YNqz z{keSQR_eF5<>eCXTpQLZStq7bi7i+=ghX>Yd|dfjsGzX{$=<{7h`Llp{;sQ-LrqOq z%|fIi_eqiZ@l~Y7Ce?=&!OlNOyW-7KA6pFxUChfmG zmbHUqyQc`G$QwspV~x@8yKdT|YbTYfYY<3C2FLhMXB~@)acO)M;|x6DHt~;Urg6sT zck>=2jnyHM50!GOHwuh(T*jCmjGm^Hy+tct4PTI-w(7f`}G+k%1Ijp$0D*GnB&b}mbLZTN1g z2ZRc@#Mm&q8dy(8>l-~?k*6FtGh?1X!K8+qIGrB*;pXMp9up6V@>~7C*B+HC^UymT z@%HIJMPI935vHQ0`Qj)ARh>LzPE87DS=&B}$Vk*B?~VzRNG(Jp<8jfBK?N>Oq6$v5 z%LddR-Z~))l&(|;A8R;RJcXcxRI6(z*{MZt1UQJt_|EQ$_Kk$;9FJ)aBTjLjjOp%m ziLLQtnz`>VesRr@x~lO3Z-&-NvrD_G3g(M{(WL%xkj-v|-yn1KHxkks|8`&dl$NzT*bAD6iG^n|R6AH89`)AlJh!Sve? zELx*`;!A;WNx+CvT=*gPs)NQ!r$xd;8&TpPJBPh9r5851S_<#ND1h_Oq{jGr0Jhi2 z^O2&=qgD)Yb|o7{Snp$=Zp+Lsd70SmH!qxWxs1JK@ykB#y>*J$s7v<_z{uMGEum-T zfpr}xZU%*wJJw*UWCKo@Ot%qDQ~6p~OJ$bcuN{qk1hX+Pub673!$3^6_4(fpG1)lS zkLs=shxI0h5L9q1bKk9G7$y>}viO*fJ{Coy9uAK4CaNv15KoT+8w!eip_KJ|J726y zhhu&yJRsji&G^`QIoM_aEHav^QRQOCV9+F%$Z2BkxDTe1wM_D1HBLk-!yScZ$^1C8 zxAJONn2@K72Pw_m>NiW5@;}swa;h3K-zeE1Z_Oir&+}p3;)w^NCbD~3naD&#p9Ry+ImxK7!0O%Q6=1lsccZ+7 z!POy|$2H$9=yx4=FD@>bsq&09mf4U1O?8K@z0)f@aak!59hkd6)BlHwd~R;voI5Dm z`ervaC!gvcZXWo6k&@SCsBW}RDh%{--WpmHxq+gS>$QJ@uFAY^u`*NL(eT`j4f`jq zB#6aj${FmqfKUcm3Ti#aL=QA7q|0pbsR`bCal5_grTDmO#h=n{amH`UW6Y8DOX4H< z2m`BVQ@nuiyk7(w6?la|Im2hc+@D|y2S2-WA3yM}d$)}F64jjNzU;Sbzk5~7h;waw zCQH}vhe+)^1hzRFu)uGLjv<@+NHKRAi_9CD0T2@1(2~a4b}7GgCfVo_h7uW%;W~=;l`6+PiVz1zTfcR)&Hjs;)amR>RtrA#JE_ z%3c~>f#Jr)B-bnm{rv8e^fSql)mEE)v@x|?oP3l=4osY4C|=ln$*i3EpvEY_ErmqN zb#UG!Kia*fbfmlgHVxP{ZV}Y`0%4hJX7en=k)sd%^EI;SqouDjCCpr8raUaU{_Xt7 zj6y!fXy@%O``t&DY!YS2KfaM7YHb-&B`iIoA2G^%?ByznQ}oG zmWC2qZo#xYR<|<`Y~&d?SUC*>D5l!`{5Wy5dxz9 zQfOKQER&Ypjbo!rP6#zYXXh2cbAp684W9A@@KY)rYmT*~#@Jb|ad_m;UP2fdGiFt; z$#Qxv?gwqT@@=KP*n5XBU5M!#ZH*QeC$u?QCi&FV%~h=6*GpW=w8$@(5#02cgfqI?4FfC}T7oNBIHSJJnSP|J!r+JLPT${)%(qUs= z&t!ho?ffvB8=L49?=SXas@OQJp@+AV%-Wt~a+JuZYenaDim-qWBJ1|uk0DW`9$W}T zZGDQZwRr-v#7@X#J4X0knRF3DVnSDEk9;GRHI!l#Xr9ny0JPi^{zPg%elp4Mh0)$g z+o9*-4AZ09#K|8R2X3x--QHX#`N#gz+TqMN<$t|rJSzBBdD@<{C=OJZh%Htbt zSGK9^>yCyQZShJw?D42|m#2fu0o7a7KR>teVfxQM4YZmnXUpiAS;tnBvv37(qw9BN zb)NA=ms;UJq+d+-sac+uFcd=6Ag+)_C8h3e*zgm0SLj8t7elW@zU4a5F>^K)yAd&% zVLxd#K?K}($Z`?AfFVy}_P}p<567CtbJB&eSB}X=l>44xc;#!n$ zs2;1tH0t=x9c?_g11h@g^Uq_KFmJI@phOuMk{2_!_CmqVP#AeV=y#bmD;cclvHrba z*C6PYeP8h4xJgbRnICh!=$)ykf0jn~(@{I>+B}B49&^gQ_8Jq;KIkeN%A}?14{I_b z!Dsk--b1;vA-eP(R?*u0z+%r9?=?LH5w6e4Rv3kH;KBKpy-QM%W_(HD6NQIQCJp{v%zL%xuuwi7nV^Af zGB?ViP17XCsqt0Bxt>$f9lDLE@H+frxG0>oOc{YLb{0#jui*@=-`C5<_ILM#U)9P1 zS2Y^HsDXFpoS*}08;NbTBwCi4fJa%Y{meWr`+=-~3u=Q9ACqe%6(2&BH>di!YFg5- z!bgu*!)adYcH>KvwY^k{;RbKGs(6rN-NhOEVa62#ut<}qWG@Gt-K031Zl{XR2F)*u zD+KZ&TpO+5&sW?(KQT4sB6(Pe7g`o5(%>SawsW}nN(MR1Q?}V~Ev+9UU9gf?Lfi^L z2G%yF=GKq1cV)rP`g*_iDvECEN=mgpm>}Nsh2s!5JgLZjbloxGbG2{RGX^mSjz%4{ zPV?NxBpRF=Rh%xmQc7qsKCGrOzyc}L7sD$eqj(PcZq#*ItBEL2=6@^Y={icJYb2#5 zxA$aJk(j^E62_J`z&~it11&eUY9Y#b{3J6g7b^`t)uEyUjWp=)GNogg*s3HL^2?(V z!CsN3z;zQ86?Xh!w30>h53GQuSuxgP#Zwp#PX{U^9J)0*O5vG9_w&^zrs-eB810|Y zi&$uLKeDyS%*lGMs5W`HPzY?9N}dkcTUKk5mxH@Q!(W-(oeBoc_l#+ita+L}MHKzz zPWH0?V;@f=!@8A}xj3iQ;9N20F}Q~Xk>DF!n@y2<>(qoT$eE3GGPA&Z0Rz(ejx!K@ zfip`;P9)^5Z}&q7A0HEsKG>seCbj($;Zd*Ukl4Dh_O{Us2}6tlCvB%=3L4R%2vUX{ zY@LK=n?gibJq-1a)gmzatg~%#4~Y;~LFyD{!@$ko7tBMKVh&QQ)xttgd?1ze`L57Z zR!M!W=nJ>l_*U9CPlM=G3684kIP)M|PKX~RrFJfH%$aM{Nu%-ISUYKQ2oPQLoI3^B z3iVFc3{d?$mxrEq3j)=JWPX5b$2=gMa`KW&AB~>`(Y@{I8%1S&i!nJaBO_Ah zICN(>T;KK=Qp!&GfYNMB_l+bQkDZ~Rr(J?kuBlX-&>K)hb2wU&OYU4^16_=gq^?_f zOE@tHv{*UgKZ4Em$6nP&NQll2R2@-&5JnwyW-syS{-F~=8`s4^lS~;oIBU`JCgW>m zbPHC@gA-;#shOzq&>2LSJcWQ}P5Hr?;f>=c%|=)!FMil}bD0N(N#~2P`7_W?m0h!U z!;hJRWvcVLZ{%=f1{!Q5h+UjM`dxy}#S}PS4%^GT-|lOfrW%ws#OYYJrfJ{R<#xkq z74L4cNqGE3#V$F4%Y6HKMHE%?pr-Ohj@pzyVyZZyVaDA_5{4k;F@WM|tID2{Vr&!v z0W*lONd=S>0XAa65&E`}UzJwH>F|snLkK0OkzCWFN$fQIETD?jRoP%)Cp_|C{`0NX zC2pCuZh*oYzE*0PI02haiGeX0={AJ`2IWR)(4Ha>`_xd8^jx3aB?N>y6`1i=gQUQm zExDfcnc< zEe$ZV@x1ZJ4AIeKXh#(cu zB`OMr^iEGVIQ`uD^y7j4%xj8czX2qj#>pR~A+>WEP2;O$u9fBKb8`lBG!)>( zfKAP}$pJNQjwA_ZdXjw>%Ja#i?QKnfPqNU42nd|BELQPMSMg+z92_UiIFJ7HETW?R z5o8mnqru3`!j-6&lh81`vVs9-Wt=D=7>AS?~AqiWpGtRn53)Uo1*mu&{u!ux8vLnK1 z*o(bn%Xm5obRJCSm~9ygy(_B*LZp>U3G%sw7lmlk4E)IM341j`B*@&*bFhZs*61Ng zw!*SR2>794^jxKzb){Bl%luhsaawgmox1YNQNGL zhY19<)WDsiRJ5YE@49dk{dc5Z$?hQ(4(@1VNm6L`hAVNWzmz2t>w6r3qOt+Sz*<;9 zB&L?H(TMywa?RF-J3dcG^?3|OJ#=yDwrm|65z39)% z!D8u)2+n)qyT3r(j_bTM%mdeI3R!6d3y27fow7fkK-TJq;zz(nPB5J^Vfi{cDzAPq z9+*VxK3QW`d+TK~+roP)0ovldTnA3?2<+QOeOOgdPVXNPJS`Y#vzg~&Lrv}h znDet|h23-G@Bsyds7FyEem(5at!g9H%{sO1p0P05F;tO=0$>VEr{m%$eGyGKUvNp| z5*qSAn;@o4uRsXyU?ailko5W?Qi^+C1U?zk%$?;lRL@5OY(mFQ4K!gESXY4lR3Sb> z0C?DqA{7nJqX9k&@_P%mmx{YJ(bMlX;?5g7bw5opymfG3j_K3~<2rLTO!=`q8>$~a z7Y8V7&oHp_a&tyHIqtQ9z*<>9$*SHyz^+B1?>yAh3JxypDZBUlDX<{Ozy~Ldd`2Y^ z!!HnkqKeoq$V5ay)j|C8+%Smg>9{laa#!TfQy6(?>LJ4i$MZ~QKr@@cqeIg+6Xxp0 z5A*=Gg1{$%+>vs_o?X5fl2`OJfu*FyQ`~i^X}NUvhFa&R;il1EV_wck6Icug>;9DT zeYh6iJt^PohJd&a4k}B&4TMJx50bO^GqlW9`RL3A@;dAs! z_abg$p8!h$jg9&2bsoaeBr!H|U430mAN7mm6XQ2z_yJq{7?jq)w{zf)$O=H%RSZ$r z^GEDCB`-rx%Q#-4OhOQ*+w-e(Gp080fCFNb6$2=bVz0d~dg>-(3>*Nk=Q2CFVd%F@ z-e4+NI9ZWIl!RG4m`l7=q7Z?eqJE_)~KO5upufK;? zf-z$7X*V`9zW%Z`n%#frCa4+RTQ&Q-gI%n&yNCK=bnagUK?vejuPrTr<#GX4UGJx@ zRacsHR>QU51Lrfe8+Y{ZH5hKOnpTE{!-DufScK}k;&yui(||~K^tQY*+CI!-f8j%l zVUzW};LX(gAhN^SP?61`ja7C;UA;Ez<-gofcDvYAtWpyPfM*H+2p1g<b~7=YyU7l58EGvXYrYSbmDJId_o;7Qqv`DV{9b`}`!eh?sa5z%RY_3j zMI502aL(+8-_VD36aGu6PVY+|Y@M#%bR&bvuP=!`en^~|`F4*hj{$)AQ?}wow(S`b z=`ep*DRgW|HzFW6MRDm8A>g{kllRxv@Fp#q!OGVp-51@s#>e$IBo#veIqxNIUJx&a z6-CO(p7!uxP?uTx+6caUekqi00~nG`Aqrh>(}p3sQ}0AU z{3V>fh#_HI?UV9gGGu5n={%gz;Gk;|IOfcdjDz2{({9dQk=OO(0MZs9x8svdK`wE^V2YL-6+KQUi&iuAcN*bGP38vgF&985lsC2YVk~y zJ?sXhVJ{v4In3LYZW!Sq=@bRQ(Ia-xXe!NKK>;C?-pndAx+1+ya5%sG^@};X-H8_C zanfmj$TFgAGe zJs6Da?9=Dma&6{qpDN%s-jz~Eqp0HbuRqMUd0|lY01h}PuAz*1NTi)$r-o`S$Rv%( zox?sYoZ{gQ3L>8#WI}!{)Q4Rt^NUkzx#jp$XUbvm`~5K8cU^q74gou#eEuwu3bj}j zcSi=avqHDQ)3Hno+xqhJLFgK%y`UF0yL^8&!K`7!bg;-Pgxe8 z_x`|NKat1KUs1upkXU5B0VCbjD-s|_25K=Ir9|jDHNnJyjJ07+e_6-{7(L{`pu;WR z^>rpL_vI;&VaWCY){1UYkkfkW27`kcFBr;!j{<%#Sp|TqjeFLeVvJ8AK{#S9p{ro* z!GwqO^@uh=GwMmNay*;$gvh};;QOG~nW>|`EO}$g98aF_!94x;{w%<2{|d=)yNHTBXew^aof+0UX{^jFtncEC^*(Gz5CEc=m&%x0uRn;+Z%=KV^O`h= zEKs`ltUzPX!M=I-DIo(zhz_(kB^9B}cHqNpQ0;%H+dRLb9-vLj`6{_NvA{{fusYtH ztG;63B6KP_!OSVYpNtTuiG}5ze+6mBE!(gy z;H^^Y_zZVv0Kv8Mwb66o5asV|8QBN`f#D-mR zku|A!IYc8vG(H^M$n6Sm$ofKk1iHrZa%#Q_3+3TwaL$GcwdCvfX_bXNgK95x!np{w zd`JBF0m$D$Y%|&X$=qW49(WCq8TqEIPahq;WMSbl)HF*vFYwOvY4__=RHvR#QvI$` z9EqpGYa>A;LwWMK7#X5X>O%>*&zcLYh_8E;HVT^B#E+EK?tZ)c@g_%O@g|!zw$_Ro z8fC`3r}Wr8pZ!w~F;c!Kcgr|{{x4)^n1aAbZTNSeK;M0KtPE9d$p7lQ2i@k%&;7%x z|8)g}Q@{I+e_ggP;&(UVuV!`nO8?ax5W0F6{4ZKo9;mI9_q*{(2d~)6VP}|x`i4hp4ww{~!kJT`6 zLCNmZwMxz=_r6@bCUjmpIeq)UzdL^Z>q@=sY^VRw_8+zVAdsdlAQA`{;)clD;*@$y zm9^mCU-#?F18pB61A+qdrPH~RE=k0qX1u3k%#CK6pP_SgApR1stU~HqH+~I(uf#0-m zBOFmsFs0sziz~^9izAdAY)#)-nV_IZzyGL#uBqDjC|z4c#@rt+jnVj?s!)0>~i-awV0-n%8+IbJqm;{6T-)`y7x!LMMsCjZ_xLq z-m3;u@=3>8+2P9Zy7@!82*c_fzgE#y%BX__#4I|B-tUC2eF)-Qr12=us}L{p?YIaU z{yWXjty&g3#jQjZI-kmA!d68Z?x3)Rx5t`5GG|~%#ZaF|%L7fQUyGh2;!!figLtBt z7cmeK@7$#IW@%`HTv{lX(9?^X?M*T5_T9AZXzGT_c6>6dq?{@#`7?+6e~Apl{yv!x zloYkPYhOddP4_Chuj4_mBV`aH?Ae1=M|{qvd++wkU|43ARw`ar!o6|!;^oe(F1hN-R@Vn#I<`sg4JGU632#Av=MlYMjSP4`f-GX3lra03-`GLYGfE*(;pqc{}^Rm zSmMIPe-2j~B?QLM$Tw=%a|qMuEnTQ5ljMJGFZ?kL&z-$%sOl~n&_;m$JNTYTPh`9F z!4lqx`y$aqL$v*d*RB{277yY0j=iOZ5y53{ItID`;*ORGT!$msC-T8a z7)%de1osr5iZTlOG_b%lFcW2?8OwY`gCMFf`I(aYvR^*Vsy{>2JzqvOy%>M<&UuXC zkQ>}IY{#BJao!U}G7^g{PGSjRX1V;i7(z)*T1mX0?;~n3$U`oc26l-tgGsjEUL2cU zBnU1%{uI6&VwcvhP$Q@;pmD4!_>|>t#ZUM`ax%M5F;Vr~;&eASIm#I&cWEp>e?a}WknyqW%&LeI76^9twIo8Gfwd8tN4ZvJKA{{ zQQ{e|&}y*4y?Mb6GdU|r6#@TSvi9EB!DW&yKOzj}$0h!j;b9kN(SLq9Oz}+bqvHK# z+#70gMSRr>nOe&<%lu7^Bb}x)r86H`s?1;^n|(Y(#;+h-AIeb11#Ge(4vx-_s3K=% z%FTJRtS{%#-??8i&orE&z=n^Hj>s^&@pR^Hdp zgZtfen#VW6{O!+a=XX>CVV=hh5G5qUFchVdM#VQskgyZ%Zh62$_8UT*iTM<@NQ71s zo6FzYfPVZA@pno)(o{5p785(N>A<4=I}Naz`8#V8IPc!|D%UT+E>csZy-J|T#fx8uGGyYte?F0ALoGZw@-ya*G4E;zB2rWHN_5Wr^P!mACN0S<<*k-mEx-08NGV7Po zTDpk6XLHH#ufRgfgMSIv`9Uc5==I%C@01kA#l{tRP&4xG|3IgT?x2~(Z(JBkl>JSo zK+Qz;g?5{YoVp5^0^d`bCWf^$>4C@x+CAyB9K1o|!TW>CgAY~E*YUMNtwdd@gJXv} z@b+~ac-{HW;AeNwh-wqEZ;6YK_a)lZgG<;&^QH53%u0J{ z)0lKWd`V-HO33F|t(HCU+m@dp7Yv%UYt(EMXq51nI_g~)9HU>KS`S{ocNluay`ItU z`@4m36!ZSuzQp$N-#ow8SP;bHBz9QkSY4Pd#1+I&BpW0igmfg2h}DV6xvb0#U+~i< zBz5cjnEu)D{gSQ23db|buK$PV(c3SW=jI&a4CB(~Y8DmUYjpzzM)CHE_7Qt*OOi_$ zbT#i|JI!Ms(`~*`NOHpBBK#|yC|u*ToBrMPg|aIBklWCkyp$Kr$;XLFDH4e?J)?=F&DWx33hO=+Jm%%G=w2LcvEcOCSIt;%#Xgw^BEw+(U?Ut4c{3F?|-mS;z zSwvG*t6pEct*u((UCdsh@v2(WbxLzQtUSp^)y%Oy-f+{1v0T^Mv+X!Ltx@;Gj90y6 z{pzvgvCak61>c3|eWzeVaOZu_`(vc$(QRb_?GZ(+`Ytc z3g-vT9Ayk8r?^dBzs@)lDw8u4b}(D8Bz78qh5cI5Yp$PzLC;8BOvha{6vuSNdcL>@ zZ(>vMdTsBnEZEMZPM_Mmv>BUT-(J}MHk~p(=f>j+7aVn;-ZyrO*iYS=SUl_Lo|0R6 zxihkEy_B}CyX8I9-a_AN7Tu?%A7C4@^K?$UDdVc)a{uc1wDH(=uWxT|`^M|t6?`G2 zhngH+6LlBLg;s%H4jsC~4XZ-eLfZ?l2p~t*M!y$O4HGuvFaI{dKVB-PBW_H2Aui-_ z$-~amF7~xQq<^q~l;x@{r>v_CeL;OeYr%P81~(mdh$=z?k1CrgL1K0}+5T+$mCcFt zPBgn@dRUeO?emwCf*)Ky#7l5Tib;i&RvHdB5>64yyfAti@~7=fNF#+A1vl^Zy!ql< z&A`j-j5h4Z402yO|7aH0*PcIuTccWO-@C?=#ypUD*2&ko6k{1HocAv8sp1<7cB|iE ztOP3ZD!&Tl-{e#*S3G+oX!Nt~3-J_ducJ<94aw40J3)jn_jhBl?RP)%o$2=Jw&m$_ z@^fqzZ@NPNjO==_)ewyk&3N7S<-Ey7XqKt;KgPsiA`t4v?9fR+6JSJGN~q^ENUJez z;5oK_T;!gJp4j{*ywbX2)wA9$-#PP9s#mU$A2E=yEUloxmZ8Q|D8KRaS#*`+aY}=Q zL-kYHDv5}W_U@E@tofj;tRJF#p^L9Y4Mbml&wt#hdi-ePq3m5|1?S{CyNz#IyIFI2 z3qz+vuky4NGT5&cFptB+*`t$)*tJcy`kthZu<{!oSMd(>@;Y88Zt}UFO`ja7tZ)*f zJnUogxs+>VcBY-AEsse#xLPgM_hZIkHrHKivg`S=lN&I2VDiP}kBL6P9zoiT^J(eB z=Hq9*I@5KnmYsFEt+_F|-0`T-rmA#o-3RvD86`{1H9KEv)$Lz>eqYr4(KD`8UtYIF zb4CwWcc7xpv}cf0pHbrDSdrLgmzOE(%*EF=>Sf1Wb@R?gn?0imB{$`w4GW`fA>mk@ zE}VZ-LQ?K>3C^T8oc*>HY96s+olvRzGo{`+dThNnr7-F-HfqB?)0A_+M0MDCo3MIm zzqdb4{)p*_#Ff?c;}%-|!+QP8^y`aoL1oWZ$LD`b=FI9#J%o*X-`p8wsr*#0JUl)d zH#zKLuI^7`jbxd^4m2Ndi4ube7Y6Av}V_KKGxwdty+1zA^G zD`T@2tI|L4IUhTnuH&1Hn?0+o>$!N+P2a4Qi9^LVw1}Q;auTozNUM{$F)-j z;%N(M3Ho8)1A8jx<<&*YijBfW`n(O3Cw!+zzqVOt$ZU;m&u5uD%r0#|j(xC=m`iS!@@^hW7#pMi)9LoP1O*K}ISHd> zssyE96=I+hbP@b@!!cbJ=V(GLu*LVs# zqd2=#5E4h5)zuf)n(wxXpAL_m^kSl@u%pZwdwVxpUYs{s2X)S(&5s)&ovA#2?#4Af z|LM6#V&J#$iu<-x5`LSzy>nYXNETh!*)DL`H>X7X?KGMdrDvnj7SA8hYvj ziP4sfmU6jXUNaafE(BEqf*%88^$#gPEGBKM9UlJn3N{mA{ZxRUa`S^b+=T|vh_b~j zMSokaxp%t7SlT{eV&-Z}?|hmdoSMG9@%iL>WJr5q#FOAMJ^39B0vADn;`pJ!5Geoa zk0LMN%ONt))S&vs?}uy4#q7SL5*UB{#Wl85zax~yY7}HOUiO`fjS>z^6*HJ^ zaNpl3D(&*UKA^}CA$8@gcUq_FPGIJ~S!BY5y2ZDAHEPfCINB)pJhACx+f80{o%61K zs#zwYUaGfsccilP>|&?6UW_2l&u^A?ZMnH*Kqy34B_-vsViH}RO*SoZEVPGk<@ z()U+dYG{=kzb}4O`DD>V6#)*}-vF-T*uxnU6@^gEdx1e$Z+!sc?^}Hm={fnvbqJPY zoxucir1dAUsS%}9O#dr8vsgI%9-n?uOVHWVPvqB!hbB9-jiPV5<~lN2q91p`^xXeE zbl)hFq!>qG(dPVZDUpDjeF3%1wMuim8_)L1&Z$7^) zq9ft9;h5uh`OAP$_hMIf2?c6HUFN*0{#d`MVQN^60=e@l>dYmZB5Xtc1spgOXBXz5 z;d3tI6gJ%8?x?u7N&K_%cx$TrN8mm0w`z(N@YZ{3!%4W~-wfrQ<&0q2FgKlMoe3GX!oP{+&eAf~F5@{WH0l-YrPCb= zyr$)xEYhhhPnsj*cPXxTGt^esN)OYK5LspE{>Vr_Rc-6~QIFvc`1Y0vm}EFHt%i<$ z-~_@z>MRNaEC&s8gC%D(Hoh0+8UJ0hGln*N0M}-I=DH(`0=B3qqMNMeeKbP%LPy;u z>suYUuXR*Q3Czj$xZI>W%mai!3R~#3<=v0bRgIe&hwf)tXlmc<)|?sXxz|mO-v2F# zZMNE=beL=M&ib^?=d@#3^Fu8)jl41!eRC{P9J%r=LUS_)d5RfKJ;|K05}}Wh1b&4QBxFlPqP3d!>^yj?sU-XH$wL+j z0>r!;bd-Ht?JSxEK&{^pe9beVM@7Msb2-qj9)5Yc6sc0@^ybZQL3I6tZV?hGlI?Xr z^&9gw6mT($F@$Fk(VR^AJ>K^3wrCfWOp41;D*WDk@ zA!pF@+~RaBzL)Yd7&}2x=&aRi?50q8E|Jy9YmV1W2(pDQpT;*5Pj0TprR(OlUAyX) z*?e>Dxr8z38gK_3-7)NNCuqCsnNCgFmWhN2_)Q~B=;~0l_Ud@*+o8|49&~o}#m}5I z9)ZF1(gyj5Ta+mNzd>{>E6q!crljeg4K?fdw!=mR5^j#dHpZgroi~PA6DF&y8JQb) z#Uv+q(EnRB0_ZYRcAUliY@Q$hVBv%i_|N%V^th4sePq;Vcpf6t7~wOt;a6!hQ>Rj( zM5$F~0Eu;BbvG`6L1*a<pjs1!x!wutds4bz%}p1l0r;Rm_%J11|&da|O> zd@TxmHaBVQg>QyNlVUPQXMxAc?<5Y=k+j_!`z0^_T5hXx1iR^Pc*g`|LuyX`QA)?t z7bn=lm-E_6Whpk=)iy`&{%X45Gt`!&#S26g+TxVhFgD6eavess0>s(=pKcn01Y~h| zp5PY*8&0c=$CYrh%y#0(Rq87p({)YTtE%=obKrj;^b{Ps9LCysrPtX$-G(H`OEqRZ z0zJ1+r610ZHbW%|%U2qQSrk=6A3gIjGEXDv4?!e9$q(xCzB(G~`Py4}v_90H(3iUG z``<#1fI%nG2aj^%pJ{*KW23wcAUedGpm415Ri%Z@)@0=prP%Kcr;3GimAcc#&_DCv z-#RRJL??=7Q^9MZ{hCgtweb5fz5$3!cI^dq<6DzI=>%1o@6EN}Nug+R(@9Cr>@vq; zo`(p{`@@|%VJtLn)2mpY6X8|gzZsE67r6aZpS8Z_{hh8=b=jJ9PWL+}AA9d{6+Zr~ z(2980?8iF{)4i6P+5_RLl<5wD3aQ|rSg-X&di%Z;ApvTDp2g?Vt#&2ZGD{zqbNqu} zP@J0h$6UFDNm-_M{wT0%IT7xyS?`k>Hn8jvq9290dMRAiUj4Fh43yp%#~%TvUi8j5 z_6e$#>Ioj`mcB{Uk-s`WI&g1Potks37g&kWJm36gLq1CpO1*tL8riXWqM#sj`^c_@cqv1nfkX~ih2F;HJT;rNP(EPn%~XukTBBC+-ybn;+awA7v1`<@vlbI zT<YJ5!PT?G^tgJD=rt+Qwbb9kMmB(JJ%4))p zBohvW(Bt<K+c`57_WQ zg-7oB(U`uqV0w7j~GAELD`d3yBkhyf3a1a;+i!23C)98@O$#ue#X@P ziQSU#tbS%#FSpIK=9k9fS1X;DqV|~oB9KT2>=BcWxtQ8#2nDE6ej)W()g_Q7|EoRL%(LEyduy=80bSMPhQ%(3SaJut6>Hm z>8{)e9SGk~kd3C2$T9AWS$OpG>Oa^3681RIv@hLS;QA4RQp-;oB{Goa+z`uR4)1d( zqt(nRLmRmW&gpt^_g;kLnRLh3mdN*`L+vRI8g~~}YTrJ2spwfZ_421O4{M?5bSQI^ zx#vK8I0?aFPJzbrzz?)ME9L-+2E3;zX@$=$QAyeFSDNFdaPEUbc(7RV2xq!yZ5>1ON@L9S7$P( zr4=K60Kl?s>dXV*(LDD2Bkz%MyVej~p|hDWKJ)*G6_E-I%_|CBf)QyE8YGtNfdbAR z-t?*JzD*d~vgXf5cC%h-Sy@?u46KbH6qtZ?rt{{g!{2dMtp>NR)Ri|YDYna_+P0Nf zt!Y74RcNSM{@074%*%j9eQi|GTF31@0@YmR`tt1O;{Z=e9DHb_$5D*$F(&F|Cm!fd2$iH|I2RP{C zAGE5ZND}@VqhP@9{QcM8J7udcHzQL7bGy^kY=$3e;%x*jc0^NuH>C$8OmZGTM{F>K z&-k$KPUA_J=T4*9Bg*2FjX(lDhUV*Y%jxB8LqMK+pc~#a{ZKDAc4r_9D00npSyilno*0@dFRzDD;MsG{0l zX*yE6>3aYMBK?|TlR4fPw#n?pQtfl{585fB=RSQPas4{;`R!sGd;^xAGa=LDJ5E1p zFrfCRu^$D%(&XU{2fh4*CEDD|%+mkl5nU$9o$Kq9=5}q&jYz1!POXDXI5DU3#qpLh zLmCwlD2){myu7>zn6LFl!YzR|h`YYJAfXn}Jvg13yEe^|H6K`Nk34C-3Bn?wqgP1X z9pQ1?HE{IWUzR~Kb>7)p$9%7o8P{_61Jfif>sQ3$B6H#YXMYv^%R!FR6SP zEgYLT94q$vU8GHO`tCm^RtGxCJcxDYHl!|yOcDVQ2nIoqLl%;xX_jOAxb|C$0!Z_n z`;uF`Q3&x-qqH9 zv!puf)mAjM)H2Y{34gj1E;h8PZPNACz^r{P1Z?Ab*z~Gb^j|-3JVae8r63yt?TX7}Tsc zH5wFTK2bH}LqbS(!|B$v6_GrhsZ+0ha2TflWvL^mM6C&aP5k^$Ylu7O_v0K_dpnIH z1?eJ%uWKy%hW2vhws1lyAe^6-uqpnHW*P7U3SU)z#-%OVdrP(~5%e2ltwCv`+~m*h z!p|hLuCgA@JbGiBLB7dz32rgVr{$!+FacZHe~-#7+%KXB5UTG% zD5abN=VC1@FhED?BL((DdU7!p1lkF=KR#BKe4_EiN{1XRpSU(vR)pY)UNQYQ#SITv z=8dr;>8~=~5x#+3a!Q#2>dY{D`Q+uP%Iuv05&STqD<94L$=(SA_Ca^7q7V^_?IBH` zF-R^#XpwqJjBWGPsBxO0XVsc89>`ZKsJ_oxE}%OKPL2hl z4`VSo-PFH%QrdKGk(g`8kKE^9e7g_aNino?A3T!#YrhqhHof9#_`(^PM}B7xFvq{* z^9r|G`1V3biHt2uBM2St(zpqezWvG|CJ_zfGyTD#td6wj>+x22~vos!UiG zd@}s?VM4BqM^^Q#p~QevZ72bAMD~N(f>&4`#d9~sTHl~gyysgWT6Io^wO_ve9s8n7 z!oo}d7IliNWVCy=!}7F>C;+hdis55Ru>ty0j6RdH@Yzyf|BlNX*~Xr#VAmRxy2>GZO)T6)y^QaM zPnpC@9 zem~e-YEMIOv5Eb=nRyW?bK_x*z`<{J$k<=f0z9NhXF^29#8A2KFWWcJMX*dW>NV8t zt?X`=Ht)B$>_w8^q5iGg1Tkg6#DZg?FQ-1CF**^Rh^Nf5vq#X$kxQdIf)a?8U?a&i)bG(~_A$5=U@_*=u`dV#5%-^pKfE z`@^*X%to4YOo=0cyR~o z&0wj%@KpDEKafK87&NrBouGV?&Q&*VRZSVU_(Af%0^986`apJYJ<0to1P&CU>(~Hz zy)m%1v}P+`|3oVUn@SG9^tfT~6QDky8Z=^DxBt`i(PXO1di3$F_N$2(ad0TNbQWg? zFi{IToH@L{*y#q!dR3a%%DP6Q+*sz#R8tVPZ3mrMHvG69y?N|6o&mg7 zu}vMw<^zHlMp!TL_K69AiD*P-io5I9BrV9T$mjN~&wd_%Gu%i|2jB-dqQ?`Z9Ubsx zErKv7O4aZaX`B!`fA@nG8DJS?YoVF|H2D`aF)_^DFpp9d%7r1Eac8?fvHwihIvx`| z!T_hEfv@yd@Q8G0dD&KN`9|+Zc*poE3VFS6;r0}Nyxb%-j!}c@IsgGd;)D}SA(e-c zS}NkfOv`eGvGz~FqkU%}9yRr$KD8WOZFigYEVZjaV2g&E|Qol-wpz1n>Dzr%B z$M|%2AtojU59yF1aV(%k@MM}A@aF^|wtr>uZGdMjN{fu~0TgBcQh?vS}oFNEsk>^wjsG=v+U$9+F1Wkj4CocVWH1i7F^o6ECT z6;Z%X=99$NCf+X8KF6b#4e^P&{pwi1APonP#4xx0Y=Dm_&?Mac_dU4Ok{$BgGjUp6 zUqDgiqQ@~8{kdXDO%B+K0~F~=1H~>Ee4h-;)KEw2UoujEv0eWs#kT2dWf&Zwms_x3 z@6Dgi*mnNA*^glY3iF`f6Awx*(Oqz3xJU*jE8ZA2t#*BsJJiX{S(-!kK!`ca4bBsZ zd5aCc2Mm^*$jy1SK4DZMN(I#vG4BMed!KMy%fG-ljJH9PW;tHl=4(;NYNrYRPeJbi zV4rU7gDaCp|CIiAS1}^B4u77D|A`>sW{MwCAwWh(1GmhDqDbYsJw9gRwL1XBQ4J&t zpyMe;s&7nu_?IgjHs;h@fp(N_YgYkN&_S4xx%Zj zhJncBZBwBl259yeI!W*K_iOtLIH%zDU~VW;pUd|2gxV*)oI@Q`Zz-%s^$K(4*k<60 zs{n&zeWWPi?_BfTv@0OylCnBoKu04CGy%MWdyi#={{={_Z?hl6TsJ$3zC4O^Vh`fX9*^@2)W(44Xh&aQyXCd;G*tVIQ5+y zsP4trr5<0A#sQ9A=e?4M2%Iuuf6#F1CB#2aDEHN-q=XsDAfn9NxB4*2_|$wZ_oVO* zPd;ksMFS3a@%)}s2J;4A+L(fMfo;G*(je@!4v z%@?3Y%u_)bHNM1w-b<}CwHk=#w3tXE{nk{q;>K91ty=j7O`3L%Sx#{ISkv4?4V~9v z$4;y4|5EK#A@5f};QU40MOu-ph|H^KWPfj1HJ#k|`fN2OF7AO-&$XjgoevH}I4Lh% zKW#4(GwlQ*x=UqvOID$<46vmcxYb;fF47nzZ9h9%_)C}Hb+D>Ef;RYuUPG`Pv@a2= z+{0Wr4{^bD%f`N|UwrLSpZkS8)NE7I-?v~)BOw;o-h{@PRcjfi=5XfQSf7o{WT zViBDsrn!{2wMmTz@R*a>%_0_zT|=e#x#-xybEB!FK-2KCc2#i!I?+FS!0Qjci^L`g zz&JFJmazu5&DDj|b$hh2tQJ7RON{wknrmx682GsjHf(1Lw6z1!IA&lxPnq68J`8}h z#_`_?YC=ybGTw*aAVhKc_uc@tWDJ;Kx$Dj>VjJK%Vjc}BY$8sJIOO$tI_m>0IV6tS zk8n()-JYj5hPki>5QT$fuf?lBhv7Q=%UzHN$p!U15S0d0?@NFZW2m0lmr4d0T|3GB>q zm;#FQ$xB&q@QLpWbZ(ns{7_;#jYh*ahA0&L+ruzODXtPWi-xZAL$n`ymTrfEe)lji zNw{8koNRB2^(cYx*xiAo*-lnkCREtWOk!>DsG!e~<_EF5O*tALP;$U@q*^9(iZTFl zI;8n9AW`8TZeS=1Y>7@aqoWc-WqN1iFX11N^pqC#0aYr@2b65XwS2((1dqp!*v$Jg z6CW!`AhV2(I`fl5B)MR+)+1yY!3iORV7{XciU|M&fd{W2!g70Jx)=rYC}C}jLwrau zAjc+ns)<7ZDR>CTg$N&%56yP$OWK+z=+nxtf7!qNtrvzS8q53l@2!r*ZB>ZXXdPYXGW6)@%e|j`t!7@ytPsZLO`QP&NO& zVY0o<<909uCrad#Sl}{{z6~i*yhSL?P`rgrB~;$Jw?HC1^x|-xdZ+nDh(s}Uz*rgs zqKMb;Is$UiYU_JfEY{mWu-711Z@M5OF!ifSo`pWcJOQ38@A_D&eh>;TAU)#=w=)62 zDiVU&)~(Yev6+0>saM6W`1U5S)A{p#q|Erwamo9kCTQa43jvy_1_i>8H8i8-Rps3y z^?kEgKZOtbCzL6>HCW(0_#jvbNYS?_p7|n)tsIO-BznGakZpc0_ye>#sD#4BOOUY$ zpvl}((^aKKo}nGozqI-K6$sf=0$r!*t=901jzgp{2L|Shive;Bpd+~jzzUMhHHB>$ zRsesyX=j1_0fUlA1Sv0`4eK;EWD37#zl+gi;YVOq0zQNihsnRm`vPgZ65ReZd^}F% z=K4y)clDneM3PK>yFKSku+OfUS{X)qB6sm4DAjqN|V33{^Grss3w$)QN2oK!+YJp2vM^IMk2 zkDmwxB1X79n0}bhdePpk-t6OD2>J?PB;37wb(u)kBLwBU|BFBDV)9`)$XMUEeAuPy z=bNPofP1AZnPqsZqunPpk2tEf=pm7C@JDuw%gqO>>Vuj~s1?LOLrrU-Bn|A0{%%%@};NVbI6FIpQkTsa-} zs(u=j8nZ)da1qbcezt%EMeS)JZr#MXZWkqpN>*-!06cjmbhaFOOWmP(yS|S`KKKI0 zYU)tqv_2@r>}j!StyKp>BDE=TUPJ4?ic!ay-+a<<_Sx^Cj!YzO ziy&jvt#e{+2Wk9}vnJH3D}}qXU;zV}B`C0RsTNc!5cQ<-_%%l?y;Sy;hG}Jj_lrDtfgt zJmj%ZoDF;rdbrJ|aA+tgMS-0d-4>ha+ej)7?%lc0HTa4Z-cgwavvl{i2cKr20BZF| zDU@0mrXxN-cXM5x<2zR`$@{?HFBoGj3;i~mXE0u52E)q~!GgrJ*+3Uw54LX(&NUMdP^`~gIczuEyC2E{8&H}cj)lDvc883OJqO;^X- z*Z0wK+EBUi`%kU|Q12N7r|lALdb)pY1$bv|mP2{r^v+Um03YOq-6!LtLB0DRDoY}i zPs$u!COIyy)szRlk|0a0#S)32BrnCuO81`Y!*l3aj5}~@7lKFzGZYSVf6l6Z(c$f> zJ=DJ(gIz`rt=?O`0Fy~WgrhHkC`(=Jt=GUJ)cB&~KI^W4yd-w`5Z%p@Gm#&E>Iz>D zphCClGA-l4!q`)u%f>%_bI-)crY0YUqtbjp?9sDVG&MQZza0H=a6%sY8=y~~`$$tf z(FjjF)IsW6#S9_IZNSevK*ywK?_|!G8R(0?c*>CbIi)p=ca78tRS{|{7SMCooix{3 zne6Nv+m;(1dAcT%mS3>T49^=Qf}$`BoLx)^*%Bw{DY2htF#vqffr9=wcaWz`Ql=wQ zr!ei9#3|@lj)#qvV9ELF{Q*dfA1VXz^p<1s!}`@Xb_!0?G2Rr1I{K0}n7^5eBZ%$@U+c z=`-3G$VPdnUnLW$SPJ;j;unE?3as4Is&Kyd+?JzHI{|5IILh|{hlU0_Y}{RrRXi7% z(fahfGB&^cnpy5B?*w=)DX5Q*qB3O~PjXLmOdIXQl@$dqIW2~zA2I8t=JhIdAxb^r zr^2q+?Vl_X)67_h`8u%wDRX7_KrGf5VgHl>4D%bk_INNs5f9kegqE6AXWqift7@BP zz<}T|vS<^Fun>9|=tmDPVD|?NQXIN5{F3mPJp|6*IUmmS5&(iatMlwJl~xnwl--A= zzE^SG+v)?4#7YA7!)xN0_^U#ENy(WFTyAPeZ9$Cu(Pvs z#p;}I$mCB^DBNjtw8uOa*eHPag}yiQowBY@A{*cE&IaVassDGhns$cK?Fv9t!(mPd z&;H%M=29bY!)3n*ph`40Mg5@844Sfl)p{IRs7C$bX;7&}&R&TagLVl;FN~@?ZV-WU zAHsi3AQmmN>IyEc=@J}4djGcl)v^xbc$MtI@(g%0LoY><$43n-7MlC;knwj=j9U?z zBaqv#OZA)ehD8F93{eDQ4do<=#0F<;{LSD@X%g+GM6yt1x<+LB#8IN%@n0>vok8)V z7T4~d*~CNztL-^CH11%qEPUnR6SXK%@VTiTfT2(V2JTxbI!CYY3n;JE8rzbM^{M4SOEm+vDA zKs%y9weXe1tV$wNBH+;LgL+Y8dY~jgvl@>F6m%l}q651B9Bb`&5DrP? z1xV0wkkMKFW@uA01{!F9i-+}zv_B3HN(GTTgui2J=>a`N;Pw8hd0L`Y7}Lj?9K^8mTVSZI14O$@&qJ@Vy zY&2B2r)&QJxB7(4k|9vZ=gx}c^}o|%Wsm)ly|E}t@#Rm!qHj;T_w_Aeu@kS310shOv+>j_-e{jRpiMef_0;{iQjcv6Yk%<-w~ZA+onZ zYjAWyr;ut+deJJ^gNyup4(;SH| zaR23H<6vnEVb+l$3(#{n0X=9*9?Ks?J~33d0P3Bg=S?3@1fs^KP_7!KTITG2-i%X4 z5AbfjdMbRd+__Y=4H!z3(f!*8BHWJSQWI{*kf1}?F(ZS9}<8Vr2) zpN;6`ZY{0oqX}FXP(2!@tmha%$|OH6P>-AqIXH2{!Rmo=!fZj4|2n=Jcp7oTW!*1Y zYrZuO+`VN}i;8|eFE6K1Fu;wR$0%4m0+^=WmB37KDZk7CvjO^qh}VIJ1G${>WD%fM zo@cZCPT}qwb)X`JO-qWbP79x}Q9sr!%Vb?76!NN3R>t~Mar3aS_R6lNaL)itvr2?} za)N0{CV6`oR;Cx!5A<)J71grvGq~qG#iQbOqQs8MA2^lLo1-O5R35*J)y8`sB^>9f zkl>Ct`!=hU>di>vO{m6!Mo13s(?@rVyoCi%=id%C``&OkAN&%8727d@c6$>Br63(r z+8_@E44PUH-h4-=2=UPQ=PT|)u#P+cj^&3~lJII*TF`=aXjQlX82$oyLJb8)hbu^& z*xMi6j@EcT;@__h%7`CI#P*K#Cb8331;7fPHSWy=$zsaIH>`x-dx_kgcjI#9nucLrpU zLq?Dhy5SrtkNY)CUJzh44VXAC67h$&-FZRN<>*O-M5f<~tm0f&r2e85A8=Z}yfb)m`(nmoTo z0#{nA*zK=G@ioCn#U&v9$SJ8`!zjEO{dlI-*)DaZbsJB#s;r1+kITPDY9r=3p`fVE zc|R$lO!L^l-qMUa5J#AtZuBWF=qb>q!3F+M4Cnw=6uX%Me#GCZDI5GFs~1Rj2+7p?o`5nw4ZLxr zv!UGgDW?UtFg?7Z!M<(?TUc@u$KB;w(*ix)g)Vu#zUL+$Bh%0HN zQ}g2653`fyQ+O|kMUXkCW-9s2^;-10oh zqLo~B?&&KwWhP`a!1VW=#MQ+(c=_X6(iH8<(d1ifAqJ#wdQ@A3oV9ErhdD(AveX`% z-^pQZ^p8Bw-6K-gXa0rD6OLgXSL?uhvfXMg{D6g=1^#+ohV*PA0ZicQCQ-y5HAn}@)+u;O&5#ei4xk z0C#AT9)QDpq%|&bUjb>Umw|zw?V8K|ZWCbVh(jD`tkjF0&Iid49|P)6gm6fD;#C!qM>itcl5{zN_(CC zNw87Jo%uo2|3#NSNyytHG57iEF@^6XD<7&u0N2Q-QYf%fPVa!m+rus+y?_z#BcY3kyxkR=f=45 zPG;YYJ?nZGV~r+F$nw_0QJ~D+dCJM<1HAI}P;~PO`V23XS+4_m0V?$QpGZlp2jeIY zIhNUrHz{=yB{svaL5{qGd7v0zR-@&HTwgn-l>u|lv_y%UiU0e(l|1>B-;x!XaWwe1 zB`Ak7lVs0XocUHssA8;nTmga4;Ct4szYIppLhFc?*{4;-1&`1UzywVv5b@bx;15Mw zwqYPUn=)NuIL-T8{%&ae#GJvc_G0C=V1sGSf!SlP)l8`PMKc z8oVeq7O(Gn#hERa`c?!?aEUE|=9Ja-Yc+hw%ZLIc061z%Q}L0-|kQ->ai6eifFqW z@vlOhKD?gC#bgrVxBuHJE`wHij?>1l46p~?W;u~&#saRij0{F03TN22n47Q8Oz9bu zCyobn6K>_Xpy%6?0t7lA(CoFKagzh)Y+p;fa3mJp^3(&q6W+$0O#b@&2Ao|yNZP&n zo9l}|V505)=SgMOY1PuZ7TyInz_kQ#+XsH#3??w8^GmyvH9=eqK36l8EhxFAQPUwy95%{9c&nCLUF7}KV6eE;kRNeaSi zLi{`o#^f2i&kv&kl+;VJt0B93FItH8Z9t!mBl#UMBKA4C&*^I-He>02M|1_Vc)%7l zY!mH0E)c@7a;nVq@gH>O zoAa?|( zaP<-22t8^|anOwX-AcDWPYwT=B_P`J3ZZ0T^e|$zqR0BF}`{LPpiA2SlPjNXN#zf2Ks<$9p#s{#6c@y$0^J6v&09#mj| zeTV*88scrV!obsQGg@08%4hFw(L!b+7k&TdmyAp9HlU%%mGEE>Nz0Ap5tH8KAtc*{ zQ>sU^oZ#)k;!0|C5G&W)N!)3ZkOUOI2^_{doo?&WjeE42mkVpl5catoK$)@ub9ac|AOAG4H!Rr z(T@1~CVG50YpL4zYE$3Thnut+Orqttj-Ou-oF%GS>n-9w><4&{;!RudYq4b)5D*)fQ^V=sZ+%@iS93S+E}s@a zO0XM6DrZanDj_RC4)cl>z=p>Ih1Sl7D{S?eeXb6e?a-jgbi3f|WPn2-u6zj=kP9k= z@)uUNU(9+9_&L>5`I=4uQK4^ZxF=f6Zr{5uWFqz1LprTl>DwEwYqom~dVNH~$&`oCTgAMAO#) zANa&Xq%JR7J6&=Ou&WLN9fl0zmnsQ7_kF=&BRKD?L4!UoDS=_Gy(9jm=h$m4?0%@7 ziX$#+^0(H76sKQZ?<2v-JLPAAC3^sZ)=u3&7`5`DC$O4`R>}qK{O^bWojL@+Y_Q(Y z&)WzSx&h9oTw44-&w>Bue_xOuIy874&H#Hx)cpY-+ev|kt)~Ll`c`=j5#a-kCf-bR-6TuvASI! zA9?5z<{!UP7q*qz%u8S!));gflXfWN;3LYSk|yXl9Agex>)7 zV#9Rj7E82`Xj_P+)}}glUM>;i+78p-zu#Y!pB|@z`DeO8xXV@E?IG`EJ^AESCGu5XOz@7ycrV?Lrrzx_bs z;!ED&|5ySdtopxg6azVG)J#ECvE5M6cv6~&7piRYFQFcLY#%t%6rg3b0N+~}H?AYL z*`l8WGddTS+vlFFR0`}YL*d^I@FP@S83=aq?V+o1u^4FWuaO?p*2&gHgPF|Ba+SlNy^bDyelK)R(sf0P@o!xEDio(Dx@j zKv@j--23bkr3)zk%K})2Q&;EO;il(}zHWMS9%Zs?)l}Bchx~xwwra`*sFq`h-6_Vl zdhS~P544jHm*=)3x^tnmES+g%tK!|jLSwqL2>Z?YUDeT_ha&&BTS3&Zn3XM~gS_f^ zLPWflOXAMA_cZ8zFzj?`%-k!SE)oM7ve#MQ6qd;f`#6_EzHti}%%M(b3##zd--?`} z=07xkTGv#lNb~bkTLJk(g1H?Uv?KT%_ac!I4{)?dpDP&Tl_Z?T>`{y=JLG74l>|PZ z?mfLR1Etn0QT;h^VX;w6YS|Hyp|sZa+gH)T0J?RLwLIuP84A<9;RCZlFFvxpGXVok z>8}F2)6+Mc;oyt+mO#o+1Uk6T{h2BB_$kkJ3dthqchIR`FA`{4fxudZp0k=|)h-`K z$F}RiiF;Bc$d?^wNZXP%0-sZUG_uL+zT01-Z3YDD6X{^z?F4ZQ!NVH5C)GOH%495H zPyrl`DWl~fDf+xhGuIDI!G9mFbi|1DmOgGpvsm&-f=-nR5F}c4aPcmTEAK!0K>Hw; z{eFIlex(u+z%hA`%XLW8E&pJxG)`tF1@?F3~4q)yfqF$?`0d(&|Gmw)+Akur^ zc*wq{utTUE3xu8cm=dN7ZJ})uapa8htg_$i)vgYBk&C@e){l5b-sy0~2QZ{+NgSOu z?=CkTJ~pMRG-+4vbiIv#$rx~hEb{kOC9gNOy>JetRI|0`y^Cdz0{jFhZu!#ssP{o< z6@qD%`U+|-Kp{~f98S1UmA&aPJs_cgU`EA8{yQMXY_+B3Pk)jN2L)bkMlJ?e2jpw+ zpdGdTEy~r<(MqyF{AJVkLb6KiY=Q<9+@pdod3->1#~J$X+)3J+0~Qamiw&hsVXn}C z_x~<1#uYP&`u%z=u4vO zuaQTKu3`!Wq8Kl(K+YYSf1bIJFLpCjJQSCL_R{MJ|3dmm=%})HU^7GT=a=ZNqah{`8%Z7l4g08EA9O0D|uSbMq{!}-aHY`R+!abc;Yi0nWo)83uS>ttOGTR@_j@=`mZCp+a3 zm~921-z$Ibp5(l@bpu%sa5_6Ne{rygo=>J5tB*oZq zxo0CHR$2w~6&MUYIsa2YFe32#|B@#INcXS(A1z9*s5-EHXI#8FnhWeMcL2?aw|5bi zAlT}xP&Z?f|qmLHvfSVb)E&y1z9rol&GP&F_Dq;LJjIjdIcY9vNQw4 zmRl?!s#j~fUt+(32n~iL zVP8iL90P7G_F@Wm%wgr+R-geU8uU0E57pRhAJpF0;n(_iFkxq_d1*SpBj`KiVR1r( zd6Pf!H&^ei@d75i>>u$_mMZD8`B~=;HKl1Kz7683;c{v^s{21&)18y2wG#Fl)NK<7 zzL73jD;V+i3~rrY2&pNF9=d4ed8jDn4dh2V9~G8qJ=6JSd1Hkw>BXm3@yX~4y^RYh$_3gfo~8q%jhu<^HhjU!iC=u@bj^_AW9PYcctJE_o zm}wZ55lO#%)p4_XOC!UUmR@c3T3(y$Y9#rxMf3r?@EyCj3uBH81DBeAe-k20v<9fh zht`c}&kD$HeXk)Ucm>+Rkg}9w1|Z&r*KH~UT!5~v^DiCNT?~E zL|=~Olfr|3-yMDLESutSYbagpi&oJMi2;Xh;{^xMwBU=Q)tW)`$jHRhu7>$6jKT*o zVnOZUfTJ=j@y5%7YEa6M1!V?kkuF)j3y311o4xS3D)c2EyQHHMy4phb=vY?E^8rs{3BLk z-E-B|hClj>?9rxc?4ygl@9NnV%uw9&1d^X?ZUXta4;KKw z3UO_*@0>G6+L@R1X*MY5>nJay@GcJ@^SEY6dw@mDkrDCivoNEI_#&1PaqkmRE+n_8)SQJm00uoH31!>t{l5qP1V?>?VX^Ue^CSb z^5^`gO}7&ufGnU*bBKPIKczhTlYizWj?rf7KvN>vOK!cCA8qmogOzba^aHv=<16zs zHx%H9%XkA3jA0HXU;sE7%xK~!I%kc);4){ZF#ng}odACWlc4YK5?~j%(+8Lt2he*M zQ=?|o&N!a=k!+jf_orlYpAP=Ul8YUbKet2=7fO}&l@LI8-uMjEQ1&}=-e*4mL_*ZN zPo11Vxw{A|mwWB!E-NByYr_RupnE&d5kwAr#CC$XHLPbE8?ad7HwjHH8Qv6d3tj2H zZSWH3Nb2Cy^;mQKJMpJ){H<9QG*B*w_lli{+^b1><`Rt)+iYOhU{Oy1Z~jpu z=#&ud#Hs}dDd-A5ryG^|CQXfYDQvAblK$&T zq$Nb}Z11_fw~e?ms25$1|hHq|~)9>L7~_u!vp0hZH^o^^u8<>pw$ zyEs@NR5--%E6a3iy8AH0&pld-2zJe9bdY+|T^#p%Wde2(cH1nUb|0Kd5awiQV7YVz=njQ%tWKg3ZFvxnH*X{nhIs z9{=CAWCUVGW%-wL&)uP$k#i~fnA4!&MOv)@eY|7jt$$1QuB{!#e~}3ov-vl9YR@~o zHY434^g2e;5)(;_9%yr_H`?BMcZz_Q)9hPLJ9&y8$ydJV?8K(RPC-UB8&tLr2F{RgPJw`2cwUDy_ET*rM$bAORd3g z?u~zR>kJ|u-B6tc=q?u00W4UuZ39%5mAko2(r!GfaD|MU#2=CZ>R2-Gci~rSVp&yB zx^LFV14Xr{bu0W*wv&zST&%J@>AZP28onvG=$A9$CuMMCB=gXcVdL+>5w}caf3h_d zX@b!`^7mNZ?Nr%~1KbciR1VQfCpIu+9`LbO1bVAr<)Z$Eaa<rRDl5ZNmm66R1FL?DeXj8jXW4 z&4yU}VnFuUcja4b>{b=5-SMZsR<}HjYp{-;l3l}yz||{(jG&;~YV8_Zuq{3dkd$^k>|>`t5Y0<77W!Df;$!W)2!1jm$T%Y3bz?#GD zPcZqOZiiQyA(_j@uSZZREIIIQ3#h;RGxj)a{}*ENn_&52o+}%!hB@}Zd!M38Y=dcz znsC)!mPxeIF0Y-8L9`^@iHMMptF~JZK36*Wknk`lS2Lk1p?Y25KEf%3$A(9U*sYKl zm*Se=Q$cBhE_M@08Ib|=8EC5n)ulr*rYK~S`yDSQ^hSTkOO(24np1`kNecxr%adn47Gi1+l3qWz~=@c=jx3OwKYw?aChWako&)wz~neVBq=wcHWuIb@GN zk(Ch5w=xfceUQZ878G+MedrTWAOh1%)%e#V|HtzOv+Tzh)MiO38+6N*GY%KydpTV zmMe3<{1rd{ixctx&Y zp(!;ho^gAYtoZ(g953aCyss&ZPJ2^kWcW5z$tXDd^_XgayPHBUXrJ!MeTj?m<0C4! zzwa_AvXa7we*DL3-9@^ZrPbrcKVLq%VGZJ^0@n!XCbeYKN&tYTS^bzq3^6r&`%ep< z9BYrYmdgv_c&B&LO_M-H(mY&*a@BC?^>WqI({>^+8M!FXS!m3>hm$l7Z#gqS8VoWt z=4jM!j?$67NpglHdd?WhYWQVS5(A)5_Ufx^M%T$`XYZqt$37CKKG_16J1pStRb`PMDZ0~mXo|~F=_qfBu?jrTs?t#?z6qa4-fkH zAacfaLPiEBvfmN9SffDuuC&~;jmw)Z{gQZbuSP*HFbTb0Ne`6D$a%=hb5r2*c}-yI zI-S=ZUur0uH_&4_O{gk@B*`|db2N;4Y|4#uB}XOXFkgjxyWJf`O*l8^F;wM=R4vy6 z=H_?0$*@ymjI*P%MXAVVz2%LAysc6I(}Hm$8&ZaZ6D?M5t$0$c)Q^sjD_(#$`3LmO zi{)J$Q2WTkO9Cbf4Pmbe~Z?>gPo8xr;9PNhppC(G; zIi^;=mf}c>iCPnv%w)({X>_@lm`4PqZ^Uc)b|E{6yu=;ol3=W?g8m5GhJ9`D1%0S7 z`!iPdTXT7V*N$0n*@IW4#P(a&4OHox)XUBLN0r8PbMn5)%okyzMFc33HwB7Kk}W1{ zE~Qc6{!6R`CNf%6%xLC^bZeBjHsPdfi`oJ`pl3VH*K*x9`0_jg$iJNp%5uPXxktZCOC z1CWeOg)6uql?Dn=1a3m?Z}H6Ug7LM;`A~71(we@*ZwxTfK0$+)kiG=MzD@=`kk$n0 z&HcSOp3S;&vRbhKiTIAl2fCmiu)jhrqXpheR3cwkL{j23BB6qF)enCu#VVR6nq9jf zExMM~>zP6h4Oieti-pCha<6>?C!Nl}+_cjwFEM2TB;3pz_p+Y4Oo=x&PVjIAxgl-K zaz^?4i6$|mnZW!O>3y)osFHApQpow9O?O#gVlDD<#$qI51KigfkmkxyE={-WiP#7C zHQIHkc zJ~yydCS^ecWdVX|M^IYc*tH*cgB~>UgHuPun0v=J>zBi3ii7fqS5;T^)Ji&eq%Mc$ z;(EFarnv-7pNP#hJ!zq=FxtXUTflo%o$M;slr^y~^@%bO-WNhc@!X`7@&vrH@V;^B z!n5Y4DeUU!Xk74bfdR~n#0ChK1x$}{7?6(Vx(W|gDX zdWb}VKGpdJMLxvlwE;F2eR{>NP0i|IiH05yG%*Oue=&G_BDxvS5+vYo96LfCoSJ$S zRs?OY`Cb@&BkZNn-Mt|Jo`^Bsxk_Sr6J+7L3C`5I6c_@XGObQ}_wqfxmw@=c?%No& zoeM)Kv|8?xJ>q5yjMTv`^ZzbHj+T^9%<>AJ)|CVHHjRJh4jZ}@efS&7d1$m_VJ`F2 zRv@k!zWxZFqe?MQ>Ma7QeLv)-18zLF<-h*Y5N5I@=~=LaN5EJiEN^-T()(!eWzqD= zH;*Z3WFO)N1$f(Frn;Q?y*eR0a|G2B}7J4tK(OH;Mul1E*8FbJJ-9r17+V2H06&EY_(;cO* zPY58whBo5qz@*_qV%eKO!gPhQ1YKH6!syw$AcKI3c z4`L^|NNizr1#n^9UvNH;@l*jNe_9_pp;d$pzl?HkTV{(_U56i>ADFO^;@ zYejYtwEK#8H?XB_1GE0%mrXegI};uL2s*cjrJL>r!hho978U}ijS{!9*bTlYgYwI? zMPN`VR-mjZq%J*ubc>I_@G=tP+d ziaG%g)TKHs~j{A+Zv> zPx$1{;$t#a!rPR3a*P44Mo;-;47{xVoVs=$hA#e2R{^x~$-wzwo5mLlR0Hy@r5_Qk z-B~3@bt6@{1gl@b+zJ?-LG{!q#aRM z^$-jUh?g3f$g9$*VMtnrPx+Xr*`D*c-%S)$OMJ``nwA9gE~Ns1M-`zAECHU@&1$1n zybNheJQWGGp`%`89Yq|m4fi5)Y7aLiM(y8#XJU|a^VkWF`p^~e3#Zl8;Julmw(HK* zwLO;4F|8&%cRHAUn>>G(d8s*uO}U5pe6srCv!PsCNS}-ZTcAt{ozf-!Wril~x)}|b zfgKQl_HNENWL%RHDxEQ!5Im-CMKC9g2EW&CYBn`MR2%>Q}_pAf9)zbZON zH-}41OdzQ}$$_y4p%v_zec+r@1l2<(D6SjWHGlu+~KeA~G5p;~$K1(lt8V?9Q zAiof70V_kB%+Y5{jiNFxrM#SBHiP4?;Eo!U*BUv63_6=hT*We8tCi_lcYX`Ah?7vT zd=JVA(5#FkP8GV9}LQEZwBj%jy!e2=@ijJQKK2IsrvGu6)5uC9;TH@N-W*-+DH#R=x zp;O`+1ksgj>r%EFL=}buVUsur`}l!Z{1)<6ZYz4fNv2Q4Nk=eTWNEiZu;8MBCiR*G zq+3ToU=48B2FfdXICG!gwty&39iF-QjY9w{SRh_H{Ilf+sf@wNYKt^5DV_mNBga;6 zVj)*=v+3fES+(Ub(T{LN5_v>REc9_-qjrD9AhZ^;iDKHRy$sn~MU%(C1dxqLfQ(yw zivN2bhzUtTsWbJu7%n~u0n)>mD@ugY4q;|MNp@V#it7X*p}aou#^wJW&Wq~V1g9VO zddT9BNbjD!wfa1_ZET*V1PfZ)r@8J1SMwY+<9QAY^!)P3s%Irf-6VSXh`WTm zPLFZ2OzY0DwLyXF`j0Hpf&{M3ME4?($$KuECp&|ZF^Z6Yob;fQ>A{bF%Hyvzn1QYriZBna-ca;5>}7k7NCk&w%$=nfgk8ecGX&vdt-2 zr?r?8(pSJp^@qDq>rF}hPLQGdM&_WS>D%lwWFbwB)e@VU8ROr|4XUry+ShY7Z? z1`m_S<1=S&EDJI-Bl>n4Wi)bHU~U;eyO412!Tif$Z`-}f^QU^t8OQP5uDBhVJZ4uK z9^cLBN=g0Dbroxwg6(QF%WzksVqSpPa!=-YFQ>)g@y{);!0|ibx*R$GET%#m>vTe= z8E0IK7)kDAeKF|V+Hqf-zWDqsm&9C&JynxR-|5qzk6sezQ)?e^mSfYWT^mT0;=6)6 zDQAosQma04xG&$vE7x+h7!0+Ae7ugnE2(unvJ~(g3eb&T&0T#B6*dc)Fk@NoX=H_< zw~A7sVZMs?CU4D?=TmW;1cYx_VdWS=zT=+WRB@A`D1$;x=%-|*o54Ulu0`4Hrvag1 zi=E%udY9MH{~6byI4G#if(N24m*MPn<0aL6#Rtl5C$wWv_#Ul( z4EYZPA7@IF>+{p-5^D|^ zBd;EdQb3a~YsW10 zQDl{FhI8t)lvu-){M)+Gbe*+nb=!icwC_ak^m%#qSva|q_I29 zg(lJXynCnaZvy!$10!Sh!=LNQ1I@F+P%er|Yj*A-!-K!_N%@#i*m&U6d=;hpvJ!tyK zz~JW{=oZJv{tV&XiMF4r@mq-?qK>16O5pvWJP zYB8Bjd?NF)8A+78a_z=D`#dD95BXl0p(w$n>3`_}6#{7~6h?lEez4GO-A<2y!5a~9 zB9d%AyJ>c41gqlBdkE#FaZG^O5KG6S68YE`-s?)+g0b9V-WzP}_dTr|ldz9(*o&?4 z7wae6c>2%_4e1nRTQ-~v;-vQ0Ol-lWxzHku<(4Zu7N523+*d`8kAqL9jJnMCM<gwvs66k+y7d6pu-Mn>2=B9dPG{BA3SrW(c=h+mt|6T-3O=cjGqdDtHD4MUOiVOi@Bi-~=+{uI1)_MLnU>zlhd&P& z!1vzQc=3YOVQ=iB!LOc#gST>rzGu#efwVV=d%d{dukFFV85WEnmcbL#&7bMTNhbl# z>9=yM>xs&gXo!%8kSlj*r}G-<%Wko}`I9qEc_Zd%vi{)nmbH&uoJ!Rnz4xc^+-@ExKEIO^qXOC(~iA>$wMKq*4HLaeqGg2-RzMKA*BKr`71*%f;1F zxFX?KnOUG%%qN;=Ox+mstAvuxxR|pGK9lVm_Xu0w(%_4rr}g=Q>*4aL$grXzazRKe z9)N~otVl2OK=XtFd1ewc1f{KV3layMeHE0W<0DSYdoN}hMbp?b6-drlI=6Q!#f{1q zqwnOoCU)5wzRMt{XDvq~lSEa1M8xKWN~x+8!6#@Wep6Pw%0SY{aww}H$F46w-o-e1VUs(52WQl5pK3o&_n#Eo!ij66cRd~Rp znr}zE8GW|N$8uj%;_`{zla%1I_Un+qN`$WgeLh_ zhhi|1eDz>IrB?(C^M*}8cHtBk;#Fq=QZuR{>wz=^r)d%S^ib@q99-Kf-At*M)(?k&zt>S$z23SM#A; zFVlEHvvOVK@^Sw0ZU$9g9`Fo)E_)qO{nVu!&2iiRZSW}qtAR``19qGWS%JERaQyR% zNu5EfkwUVcaNwqv52TopaS!%JXy7d}{S)zj>y@G%`>S5PTLaq&o?TYx%w|rx(PsL5 z(Qc$ed)gPX{Qgm!<^CnnDiL0WV=Rf;qk#uJ>mB<;Z#-~>8g}W5PPa`?)}!7=i@0#r z=BU1W#$B60{=vohWiP{8Wd_z`!yF!?^~3?Hqu_Y#K%JEI=L}rxKG}LJ%^`IuPYL_3 z=3OosZ%BGXFI8Ls+tVk|OMF0Fb`tmd-=n0ksMEyZze1mVBT>gx*+z6?h$pfQn?cV^zNN@0EYodx11Oo3s zZ`wL?;~|EHFPW;Xpc+$EN=|gAKgZ3j!VM<1sF3W+=~+=>#rISvpZT(W#zt~bH7b$C z?DP8xPcxsto(YO}_CYD6G#2*?-kDB}iL?-tc(@$HQ{PMF}M&xN9lbe3v! z{#Sm#z4O)|P`E6_vT5M#-GiGaz7cu|9NeymZBM|}IV{b;&X5OI#D{-1`FZ){b|#poQc>^ zH29e4pc|(Ra`!>ewt`#fyo>O4Yn@r@uJ(r0zAZW;e0updOWDw1%wO6cfgG?uolnNL zhvC|+&Io#pUBpr5JKPXnZU5onn9!%f&l?}zK0ZAhQ|1BEW7tkOAT5fe;qbmor8Z!=5^TK0<`w0 z2QPpyr6`GSeQ!7S2r25QUNrc-*EaApq@>|r{{*1i9CKWL*+AOnYYRY61%!`$Nx(Qy zW|`9)Oe4i$^E}tT;oAc`jGGT$-)j_*BA5f^q%)Xcjv~X|d?jS)W=qz2>Aps&UcHJc zA`kIm%6AREEG~z*sOm46Ub9G7h!yiZN!$MYg8o(8-kZ*I>ziUE?QjIn{DGj%O&7OtbJ*aIy-p&tzl7IOz> zQ4hW9eF#ylXqw62R*EMvU!eP-(4mZ49^Ff1(ac8qLYH|JIax3%(O&Vj{_+HD`RI(s zIZ2`G@+|kmLxb2_^QCftZ2a&B`R0der>X9lqFB4ODr^nq5q(W~-1k%&fqvcgSGwD-< zAIj@~d8lx+ehvuoC2!=mDe14}Jk%&7e$DKaY*mA{gZ-sPN-sDk2j|m)lV(vFji$pU6)pgZC#i&Pv3|;QmyLbx>s4x zrhlGs$dE$mtOF7C%{i3}a+lMW#7b7+mo>ZQuvMb>nQ<${ z>EJf}!L2OSZw|~gQy}h`^mvolha^~TuPhR?~^yq$`a*^>_x2ak(i$22`9?lfVe zc=2zOy0%(W4(o4mwJ_2P z>efKeEP>BX(Sj@0k2e;(2IzRT&MNo{ewe0Fr|=31ANOV<%Q}38Ks}hJN)pv?)bbeX z8W)B+F8T{xW()sUm08gQH?rzUN@^WbqfR~U^q^+_GXr2vvG_e^jTajO`zHV@AM>Vk zx+SKwZ+xHEW7#I_EKj-*2H1rNd8#>)Xh}W8KqXAFg8Hdqm8=j;5MR?0e^5g05uxU{ zO-e7^ieH}I*(#oaGM)0jGHn}b<2Le%cZR$wHlkXD|H^#~#4svdV|Q?Tf`!g01p(;Y zsPR{PK3+MgIRTJ~LP>IF^{l5TyazAdXC$;c(x^5li(K8BZpb7? zD13$`8}JLIjdX2Uo%tl5%Z}^Qex_1eFuwtXix~UUFzc`c>rj&7Dz0A_+QU{N85QY; zRq?MkkCfw?^7@@LMHk9m;DjhIX~w(!I@9oW>uJx;H9d!JHgin@rbxckPYVWc6W|Z~t+uLrEVK^|FrJW{Q9dFOI0% zy`Ce7TK@n&4rL49&M4mq2wWdMb6wG4F4?ee*FQSkI3cM=FOld)^%!*9s2n{yXFBAI zxARG{lQQ|Od+)b*RYrZ?MAA(iq$_T|z-%V}fZmIF6XLjv&e9eESrgH*e<;=Pqvl7% zqA3kZmj-yxZpdj41dw1RiF>6?MRmH&UDeh2W(R*$3g+i{s=`(rGOjzsFS0&L!{udJ zKp4bCqupL+VGv6x5O~O0R@OPOVGf>^%$vAo8;$`5`sju>rB6|!&Ks|}o{j7k@f2B) zRI>?!3oQZZjaNEbp2+nln!$IczZXtQ$jCXbkylyODQM&3Mo!? zbZIvanBioe*P}ZLbh+oQaDEWU2+;(+uzBoNLb+%~uqw{2P z(586c(1rNn9HU^~T9oXnPyC_VbDGx&nL}oeJ(ZFf7|teNG8i45UaPa&*L{eMj_dRB zzR_`$sPn5~GSgR^?p9_G2Ir# zY$A^FXV)NZ^KGfdwDU##tQIBrqe5cV`6E!tf40%K zA)l;Y6XM%S=}7s!-l>Yy_jf-MEvC}o z%b*vVG;tL7`M#^-K}OPaX_Q0~h%YCt*q2ywW4#NF1dL=}=-*Uq|CJ5$CKo-Nv(?o5 z+%N0vrkuj4_vawZBP+VH=gcLUvGQM(mu%v4Hplg6-#9D^(SAxV`1ITLS!I+JKdzNDEs&=pC*j2xt(dG zOQO-*<@9!~Jq}FO%#*v0+l~2EDr4EV7q`NbSJ%lMdG8uri5jVX@sU{WvNzi%@PX#M zAFi>>1td^xBzhh`Jq@NGKUzjL$@Bu#3cNaE;bT8rVgXK;u~ z4!%<}GFg`}bw1v)Fe!*%O5gK2QAde}A>m#-@TqbDX=TFv9_hG5W_Ycoq7uxSeQ@Vf zYqa}g;dIPQ)#L=2@Y_bNfT1By-SJCqNh;aJzg@N4U5Wa+xgSh*+xPp4lkHpl-el{i z=8OnL&lQS?H#?D7I2)S1by9Ok{%}%ioSQmVi=#rcEEHCrn=>)Q6OKooqnbQl4(8*u zv(rPTdazA;^tLI+aTviYrkDhZ){;S(ZMqz$uBHAEmxMtL!QI(E5(N)Sq8KDxo?!GS z{x{G5;0kiL{D;x-Nh1xoR<-@(6OeB`Z_|5KZss{XRG`yqtbM~Q`cSpFT1uY8IMc3x zBh44bQN7Bda?_XcQ%DSt3gKEq83EfyXmR7mV(O5fPZb#cVpy~iG*3fzhJVPd?q)u{ zwwafl99eygcHCQHY26hkZ}m0UAh%-{d+4>X3+ogcztIJ!o7wr#uBr@SLg|S|C2wr2 zd^IvvFeb)Uz5ZrG)`n z$oLQRl8ntJY;ZK7b0C9L(Lko}e zx{e9|h|qoCdr!r@e*50P0@>FQq0M0aJbI1Wwe)|;$rE~%?>B$wa2i(Q3rqLR`=d;k z!Cg?Q(6)Vo78ify{TFOvo7eX4r;o;8M>lEntu@xFR-mPJs%0ExiC5NP!7^a23ZHe0 zdz;14ljCBb#>AKUp(pC%t0YR0p(I@LwQN!&O>>VLz+hdlw%cNCp zF#XV{*UdIm_|pcDMuSfrRnfgTDkk6_4ob zCSo12s`XuuWvC{r`@-fmLEfZTpbKeYZlC!4!1jy1d;Dc>y1-{LPRkFx!bHn?3^PL8dDU--lbH&m=`oE3^#%8*6V+K~s&Fia(RiNbI!{iw)z$ zk<;0qKR%;vy?5zPRCIb2;&27$yYTSy{+{u?vm^4o_Rn4fb;GhkzMVD~29H?P`Gq~> z-Kog#yQQgZ*eUaKOlLU6#1(uH?q8~|4?UM`^!u50ueJ{aOtJSR8TTM@st=e0GkR73 zTbGM!J`lxxNnH!VAGsxQ{}0~q_C77*c#n#@`iW+i34NQ+2X9_iz}kZJw#{&@%?R~~ z^L&BKa+nGxgK5J93=a<@E3)24=zFqWe?F*r+O46`S6}}bdzXS_6#M%0(6B|R z!A(>pTteTs-YIq;srAVi1n$Z|Wy3Evb`XViC_BED{uLS29;dX3bvSPaqz#N z`r>NyS3^_RUIw=t@wgv6w~73A)yJDYob?f_69-29eQODHil(;z%L2F@rSR!d)|sEo z@qK$s^AqVSHt?IDGW|Gd`#^0cJlztX4hLMK&4~Kp!ofc2-MqR7LjvqyaW+Z(m-|X5 zz0?l|$XG0D7kNZ>ldQ5tABYxu2HqfRiCx$i(k=0{GYXpiMjTju=Lo~+qXYAp&~=x8 zdBiVBD^t%s9`mz(Pxry%jU8NC7|pwgAddd_7Ar#8LPI0f^JnfRTbABqJZvw_Hp*Tl zFJ=;#jx+}x;$1`QT_=X9z_X^5bWZG^^xGEP8u9HhOz7d7O}O0o12B^J{dlsQx7>Ep zJ!3XO-Z$pfwGvlsvl4;hqC%c#mxD6i3zXzh9bRJrO?Q%D6cYX{U^`v@~>`+R`sqst#VI=;W&VxDtXwCa;l#V)nhH7A42JEIJAP>TO9;mpDR32ijWrb`HtGAhE>HelU?{o zQ!~!m#cJ^IVgTmv>8!%GcJ-cXjU3hC#mwuk-*QxP?WPD1CY_{M5wDNh_&dHWWM|F( znbOG~_%3gejBU8%y+)gG!wG9>LDNSi(eTtwYJ-#=z8(n``Pg)WoZ$wU7v|`ml;t-S z)#Act(sHj~%j$|xh~l1#iZ!}tBI=mP^;@kV_}59Uh2E>2p+uDzKTGc+FsrY39H!}~ zn#T6mda^+P>qEOfg86D}nQ+9D==W9B3A!}bi`QK=#;PL2$d%+x-(ImU7pky%4z*(p zl9+fzh>r=gZT0e0G(f->dc;jhW8c}`+H#i^(?&(8>!m4Ivnbix&NIT26+u{M?zr5W z`eC>SuulO|{Ed^~i*GU*#TpnlNSkfdU7GgC?gX4BZ5C|0r-i-hIRjtfUXSn=K0EG; zx3j0++fzNubm+@P#7aZ1_zUZPlSb}yADf?rfCWSSnlsRYsvLen?tvYBOZOY1^4sb% zL8Sgb^{tC>b+#ywkkCqnz8>0iB!O#?2aZ6_s!K)^Blm&oC-H@SkxhXIEY&4fj49$@ zlGXNeXvk&cw*x6mO?^(D|P*4#HnKEcyzY9 z<67PF8`enyCYO(|InPF3z8%MTaW<`7Dko#t8x&x&!0>A^D{^DEe}o#huO+9dJ-g`U zs!(r%>eaQSw9L+>yOTY*^+S8`46)v?F)g^6tF5yJB=#wT%Js>5eqHlJ-9(Fi z&j0+~xj-ox?OX63L)ZRq((vJ6O~!mGifw&((1`>@0sK ze7D%&J%{9*B|IJQ6)!n^d`l4b5^H~mf&B)vG$qiRXG8lL2mnQCDvKhF zkv=rnxfc1Q)U0S>RazMR&R7vjfn=#|hS9xe1>)NJK4I?j!;enE?fCSN%zmGS_8z8g zd)v6kb>#x7VH3=izq@zv_jTN*n9ebqe+9iX@>n0a3#rxm|K9sstB9-{iZ1^fW-ufw z7DMxdv~k-+A2iB0C$&y1)?MgX6L=u)bO|x39pY$X zz<}OOEDXWN^L_@3gy?92D17F#V*jK6zxg)9yLf%26;dE0bZgnqZX%F3;QaT|##Z8i zwMgrvI#3alZpyg`*7r0TezA3$T9*ogX}^3DO4Ij>m|iY?XnE>v6w zi|uECi62q+G$Gfd@7e6lWJVLJGyZJt3Ar6vk*z`UW5V)%kDN!DPaRvb@CRF@m9=La zp8qTzA?Kd)_|Rv=`^V9!R zxaccdL3e08_sn!Dh9?3pW2`lrK zoy5Ky5MbgZZ?O7d!VF{(>_NTO`>;X`jztknA>lTBX`})7Y>DxHRlmRQtq_bLUKZi#uCL2nB1*E(_|0SASVoPQuANTZbN5rse3NykvC-HGWeNyWCnlTSt z+g?Rd;jwPl) zZhjN}ga-l|7(Y-2Q7jS3k*5AAvoHBzbgMus0R#CEyw~m_=R6HodjB2N?-d4VSFTY= zgUVY7PAJR-IYus+6n14$j`=Q_|Bj#u6oe7=_p&K`Zv~KGbWxzLcl8+wxi^75;O#qb_TW)9|I*7`Ne;?={H%D#9|R0Q{0euyJ(QjmtY{>giQPar1#*#RqM^ z9tb4h$V<=n-}HP9t=6(X@z^UJ51s^AXDDTh3Um^;gmf_FjXe!=_@wwtrS*K{#W|Zr z7|DUn1Vx#>qY{mYYb0q+u~c)a?E#O>LlZWo*?3A-he!3xkCfpFr#>J~_3#csjA5GgGwHK?HH*T z$8`F9bgC9smD4Vh7H_gTTVyuIMp=}{<2Dd|NNwQc;_g#XSYl@|SmBcGR8SyB8E<)= zdIsXDpH&)1>-J#8csn>seuMDGIotIaB0Bvw zI~gwE7~^*P^^IL;0=<6xHhPgnZ(ksJ@=hr^HNiMfx%f3D`(i3ew^mAB|U*&`sslZ2%#*ES7UFyn}rHxoBlqw#3I@uS5qC zqdo5c%~2J|lmtBf^3GHgG3fH7E!P+W3G?Oh2+5k~!=(MIkUnrNaEpe8V?BN?L+V8}K;$CO_#}zqG5XK zcX6`2@`yS4;;!%HL|ZZKSvs=^v9Is5>nbU{B6xk5Sd=qVV_FDPzh|QNYIF27Vx?|r zJRie{7Lkm@>N|HV6NIzTzLO6K(* zYe81si1r_JuF^mGsVh!%KXe|fE)Tm~uF8ux9P|Ad*lI2;zcur?W>)9XT@!M{-uGIe zWLkfo7cB6n@fpH6>+;hLx-&~fT>GCQG)h2rmZ{WtJ%UX!Hb$ZBRa>8!Dl5F_`(ecn)08`4J1+1G%{DLq7;dIb|TlL4j7Rnz%H zUEn!2Mjq_ytXM>P)hpS})#?!9F_wg)%HBA{o7u6KSH8-SBX-WCMm z5|K;De9r21;{QBLzq-5+MJ=^`#~}Dsd+;{0grnt^dA?)m2U)FFtPPH&EJP|-U;n*q z(Uefo4hNPbe|qMjJZ$xL;YMHgZI%KhuMp~-hZ0#-)jqX>>mfPk*`*J6**1L#N+T@J zgCkLrH4bU6S+)$P{vD6<@6N#&0oP)Gxc=|IGs>f6`qaIOYQ?(pl&x4J*RgW{pt!1| z!M6dktqGrO)Yr*I=`|m`1+IQ664MPBYLwF|YHhH)P(CXdH2LPPgsFqVv8$=BlBz8k zFfA{3`>mJ?>h@6`n@VN%)^8o2Ux#aETh!WShXJ7$7+#+Te~$#y8BM&5tB_J;75r^= znK#}rSH#6%zmUscyrF%WKM>}0bY~L{?@5c)z;hXa#ZbIU5_w-lo z)X*3pSo=L3rKSBG9T0B9UpSSI{rmmyl%#YeT|JpJq#($u=`ox6*2EjKb-nrXT`Ps zCZ^W?@4++(2h$X7c8XW(6)N!P^C4T+k7j9&K1csnBNbnW?*W*y4O@Qumlp4-WAm9$ z&t`BugR^N@0u30m>G?K?6z63s1=7*^uxM5T&)DqPFbNieu;>zw-!lNVqw+WOjS}-u z^+~3WdGA`<6=?tQiz&28qL2_869V_)ciPpm ztl75kX}4!LyPF5FK6ziztft;6zs*fipd+QwXOWenbv4~g;t7GJP$F~f zl9=J0pQ?qob0mj8dRzN&$$@oCwI@Go#4~`iK%9;u;q9Ay>0x__xwQIZzKnd5>DE_8 zhRak^7dz-_0ZT6wW5$gAVi#=}Uyt#Z3k%2PF8Y93Q!gjR3Cs35YeSJ>^_~`G;BEMZ zcWqw6%`2?tmknDyepX4VIEBiO0q^-x@)&4haL%Zk4(YD&f3!HT7VLB&$1Xv2;&>&! zY+~b=S58(PmrZ4rL8n5sivSjM`aMCWF*WI|mP8MCDVML_S58~Mw<7Jl3Ttlh4QXd; z^SNL#=ir7phX#>3oBTzdrx2*Kg!6HxjGU)GiEZojkMGyv4ErGty=&tIOX&PUta}V ze3Ucu+F%SS`V|U<#iOCg?_;qv*Js;wyA8Xi@ZCE8S`Iwd3U(OAGLV*pI5!k}vsqC3 zo^IDx-lOUJ?9;Ux$Kh{ObWZJ70lnmY>IGj{i}Z*%+&)=jGH5u+=$9$=*ei4j>MvYe zt?PfXWRu+4`+&RXtC^9J))emWnSfs`e>wAU$A({QvQHAF^4eChqV6Kjr?%CByBvO> zw1?b!++y~Ow~b=G?UM>!XVvR%Z5j>BA{~>_r|VWNa`sY9zf~BFRoUs7kR0=OLzDgO zyz-JkBt2>oi5wpEXf7*oW$jb3@6}D+z1ABm&<2->bd|{KH`Q(<+XLA&2#PzgmhmDd zwdnleq$cmSxgPZ+Zu92S)5SNAuryK25p`cPg+jn-=}ERKPGUZS@sE|Q78J0*;Dbb$ znY7I*aCZ5sU4~vLifEkdE8Hfz_FDi8Twj(*XMN>;p4@wvT}ErY0-SCgQf?gvS~=sm zx_8u(rt&FyX)g1voYHYXTPw4F`=o;VG4hN}6&QQFO3lCwfcipF?h;4$Vn4Zf(xO0J zY_`6bLeC7JhdNg==}}oO7+9_`HDW21QYS&&dPI&ug5*M7DLObXm&n=QQ6DC+>2k1~ zjGcN4zoA7St#6i?j#A&YY~nt|_Col(a~I~W>P$ily!dbGkJTrEx-{M77M_eLBN-lT zh|$>e@ai5-IL92wV$L*<3UfCb09L&bh8gL6j7w!rF0*M}2-gwfocGqM$e-r#$nk&t zajAe}N8Wo%c<^=B0lUo_BhU%LhB#ZC6^wd~cI5S#zgF|Ugceu~EVq#6Ef~2aXs@`P zK0lv)!$a~aPU`zB?)RgS`|B5~w7(>)P$in)Qi6^7$418U(n+|_td4cm>KDOL?P5ny{GJBi;H5x$o}_(4SZvP2q`#VEH;=ya2H}jQB4Nnp$4&{N0 z80jUo_*wSj!`guqqzw^%vuc>}5$pCZz1W~n*L`Wxh1^EMXb=8&~X9n))qdXfb@+LqLHg&PgXcLhk7>12t=& z*HqNzod*-J5PFlbX$I!Ghg)%@4R?M3u*Ye7xD`FdE6v{MsHlY|(>D4*r{#q-nBU|O zhvFP7GN<@mqks@~%l#H-tqzG}k$Gbz{6uF>PHYZqa1;imgc^4lwmUFl>U^UXaV@On zR?1)?BmpnaVsrOY^52JA`iTs~ke$s`X;4F5MzH35Y2+1A;NGH;r4ndA?7_Tige=}!GhIX5Y}d|a^Pja<)h zzC4m&*p+6m`~K~qBff5J;s^DCTin)eO*pZ_W_r8rs#UFtuVEUYG$qF{`1xU8jF}g)Qcj~L`!zq2StrIsL z!0vcJIRxbya9w|bOKD5V#Te8F>^-dm%qX7(KN$u5CUO!|4`+M15`*{w@7fhrLr_&W zLQ%}8;9bGedk{}A<;b%VLj89<5@(pJ%hDiqr+y%&_r3S;fOOLhW$Po5cysc0Z<(~W z<|khvzI$WnI2LkE)lK`-4c9*(d`J6Evk33l3C(DaeeuNe?3ZW7$^J2Letm8X`}})y zdWuB1$-F~C^tLv_FHGBYQ8))v>3NyQVSbOMSj?$71op$U!xp^(7=Ep8ag+_{(Lr`- z%ikRh(YeJ5!`BIlO1oX^GRN()d}c0(QnkbG43SMmcgW+MxIZ*GmdCk{_b#%4Sh}0t zsZFCLs>Sw(7@bG23b)lbHr5SRMQPJ4m0XXH))3ld`K#wv6bE!^ZW#T%YFqfRE5Oj)$MXL zuD4Kll~DE-^5=|?^m;pM;T249ghDqh>F!C{p}JPVMeS*DUloBXYD>Id$4NBh$h9JE z3XKg%b9WP?9;I~FO1lSk=gujUe>Q#_#~0<)W+;S8o{r-=ZXQQ97v_^3H>!?`6LrhzCUmM{Lsg)S*$&Kxk>eY>f4xx8QE z$XkPjCpcl3-ho0vGJKic1np15};)Ms6NO zAE!hSLSr{+pXa>EV-!ts%hPv}F2?Fi^qHu=*2#>*9Ao57j6ocXyYB$p=xeUSRjH=x z5$B@Ea8wp*i7slW*-j9nHs)^4zJJfThB-Y{5Qc-d|kRRGt8J7iv$Pkqra>cfmhC`Q-#IcfxVc zsD&K)7u)ZG^_rwim~*ZPx0z?0wtRA`)#uKBv|la$%tsIXXEYvu5Kg_i?Rg2dv>$sp zTe-xod+L_Y-m&mwpgIwD(AY!X$AG%y&b|LW6{VLLQP$_H6GHOFm|u*ko*yWg(Ig(* zaeAeEe-|z0IQ*q&^lRN8$FH9@g#yr7yMJXkr4+h%5JpgrsJ1wMje8yb`bf2i&FS!q zyQ#vvYu%ZiF2Cg@gXcKoM9Px=aC8ndsX_q)Q?^NjrqHaWr-R7>(zXt zPE>QGj=GNpwvRSDntpnWQhMJX68`1iubKPSwv+vgcf=)ZC=`A#e^16*o5GDYvS&i4Bg@+QEdvPc{Z$=Unh zRyx4|qkPhxe~v4{c+#zfWk+K(>QdY7;Uj{~9JCR3c)Z!BJz7v}B>HR0mcqVU74ch6 zwG;*j6Rd(=DIc7>D<++mO=U(kXK&YK4X}BJ|hwg1n^yBR}y7CxIWl(SrK)}x! z6cHCI?*zlmI^QV2o#ATuo;CSX2Gh`W)DSR5)i96@5;Rr>Gw1~~5OQM7aZ^I$ z@>NbWUb7mH1sT!$c^U!l7ETMcO`i_|#mVXVIM)z5&d;yvRz}E)lzVN@(CF%nzXr%P zh8%iv&FhRiV(sxW)%tGIk!$UzqVX3I*ybXLOuQ;18L~_@ImTJ78CL8qW}*X+0ao5W zSw$AT2|=I|eY_y!(1}Sd!34f_8pSyzA79kO_0;8;Vm&894h2x{;!1V)nsj+502S2& zg7e)f0m8Fy&9Ls}h^JQv9m3O~NSwRACrmHq$;r+`7U6ph>8|0xdo>3pik~q1T--zF zb7oUx!uuT}X0&qzE79BcmKJlYKB1Vo&juX7J)hHzA-{7I84&~W&t1Vafd|@}a8PuS zpu#m^BQikh}RZuGCkXcgv!9b0iHTa~95LAhJ?ANw7X4O}(BAK$NhI z5>3NN$spEH%ph-u1d5fqc4+Ocmkw$apk2it7=Gopb=i>mcDTiWN%TsQqXVhtnDRAm zu?2?JBWV0*D%lgo-vN8_qM|4Y>uO%v98&;YF!s3iiu? zM!9K_vR%H5BFJ8d9uugY_56F={Yh@k0oQak2f@M2s><2cfiUcjACQ#So5$US6D>Ux zXWnQY0%`|9rQIo(U8;3c^msY-7&Et?!lYAeF@*5qxqxWfR~OjfLnM&YoqNtj)-R>Q zLlUawA)P`4vOpYQ9>1*H?!0cJfV$k5q?-^a_}nVXz6JDxCA=)9z~StpFZ^cR*N!@n zZ7W2A`1&2}AZ6Vx&9Rg7CMNuzER+TZO6PrF~56;r3$mFb=s!G{bPNxJ5qx z;MLujuKHLX($T-4UjzZtwMzf3Fz#=Lt=AjqgeUTt8>YMww0Fe;jcAo=+S1x3(65-;@*lC;%{Xy_<4FBVMig> z_n4MkEOd7xY_G1?j@riACE7|fr(kUxMB5(D&RaY)M4C4tezUUm6KKmrAjl4&5o}n$ zhv?mF5bz9FK?Y%HWN;ZxeSgk=fPw0TZK}Q=kGh3W&iSt^lJp5`G3g(9l; zme_{*(q4e4T$ux=iICBW>{|jFs(nn)e}?&>zC@_DY)tiKLgC=JAfg^$+3@ojpedxJWV6iCr!Rv zr#%nGydM=#Y!=TPo7we|4+AV|^rq}gjUN1am%fMyJQrG3+IV{a@IMBO9ho+;k-qg^ z+I)p+Ir6pz8&xa|PUvo2v6qTtsPFQ=kSksPs7KPdK>wxm#wA$S)>g9V+?IM}#TuIY*6SBgb-TgWYPu^nHw1F|(r#$g*L($?|7#6qjX+PKA zKm#8xb8TpLr>5YCJ6Fl{V80{-yBD!Kn~^ZgaAf!jn3I}qzK420ptxS4^#cWc2oy+a zc*nt~6AB^MZhkty4PG5j^j&0oS61Dklejx|<=P7D5J5WV_~A^h4MoFoO?r2UfZ}%GIvTpYwchnE+P=a~G%V6Mb_DA1J zlH1oTUP+Sa#JyzIvA>vh5?lBc!&>ytd`@)F>rbe$>1>L#(}O5$A~DC&CfisuV|Gk| ztp?&>yISn&lq!V1E=5C@YB}ORG`WSZEx-Dp=_q*3!eLlb#i?7Br|qK!(y1)R{>$Y8 zWb-@8e3|)%eiSM`u_MWLgg4-BOrt9niFYwnsckOgo;fq%q&Q>r{eDYA6Q1rV0+GY( zvhY#Q;#*IO-s)T}kO(f+K2BLfJ$HKi*<2BzOtQk7mE`;ozUlcPZ)paT*^}}~UR|>Y z9sKy=MG0igC}1czkAd)*)01UA^#OE00;TekUv?_`G5pD|UW@cx=b#&QY8o)dXO0t@ z(@|OtS4&rnG^6`0+$S8~N7He}W#}EtED43#H|)eRKcC&Od795d!2RZNt9NbPi)QH> z;W6Ri^V~Kmoo3^6k%c_Yg12g2ywkopMcTI<%SN(nZ}vm1&-KxWmPJ3m9j#bTZorYg zoWsNc?ksSe@>_Q(66Dx5=)qZdd$;R8CyjK3Pb_`j z)ikrrz7faAw{t~5e_{yHddu!<*u=OkTl90w*-HR+3;))QT8+n+I-+bntQWad*B%AN{1sFnM2R&YLk*5V^4N z%nu1Q2ZMW~TJ;I8`RsFfgX%=TupB$h9VF9ka$PMgr66GCULnWU<4T!)d=;Mva0GU0 zrp^n;k$(rJh7y5BlPf1&f*C=`C%Y3-P=ntSwW6sWnxLisZE;U{EXs!s{gZ%&QQUeS z*N`hTobAt!owJglyA}OYJW+pY3xHY z*m>CCXtW4$S%GhkwsM=zf~R4oWGc_{cEKS=rKP1ME(uEI6Z!Wc2g<)DR@;fy&GM(D zvD|Mc3qNLTMF=pvG3>4WlkP3>pwo&VgGVG6MW{*rc$eEyT z+r7TLQP^nJc4TYSnE$u8W3h{ z3-+2HOXG)$FQ<-a(uu@CMI{yD!<9Z%#(g=uGtB0JvnVGm#3!D#=NE*oa#jw?kCvw5 z%{gyaak>~S`a-mz)v)20F{_&aa?wKfS6OA-`W$=Daf1&_RuZdvU%5%-Ac}yqImh=G zdldqK&LK{x$EV&&&5F%)P`47T$vkd`QFM!lp(lqtQpSZK%4$M3%eA00%KN3?qtgx8 z+4ygVr!)fD!j)!pp&Dib+g0EdvNgQzhDK>U=MqD2=XB_PAJue}5XS#>M=#-irBFX+ zyKDdBTji%_m4o1l<+?n5p2|VBRcYe1Pt*4}jW;CQdOucZX0GloBpyzuNa+*l&P7SZ z(ZYLxRcHx5L4}oqaszYxUK%A5_>qE){rf)e-*O)0=+voB5t*B$2~0TvA%-=|uf=Gk z8+moDt2Xi`mh^9N?82#{vsjgnP8XQxks!Qt!Dl+GB- z*Bq?9X>zJ)zqp!InEcPC-k;psdhCd-%kGAu^NWM%9&ve-nZp=WL;W9^Z}!UU>ukU3 z5Hqg!WeAihmNQI_X1YC2p&uk#a*-Ub6>f3GsBP*NU`(0AauFFx}D~Df6C8}`7^YYB- zlWRz3pFwY@1Nv4-=*OrIpwL@G^n1@xsr0R$g>y3K-?{R#(oF3)B>-QdbF@1CH?b6Zr#(soX0`jfA~Z zy2(wHg}&uV$u=*rul?BU|Ct-gYV$es*~ns9=5X=$pQ2C=r(-OiDjUNV&se1M#nq_y zf%BHCW|SXt2Gj1w_kV4f`fw{$*}=642K6qreXEaJj)gWHeG!F%J;ZS;ufvd6VpvTV zAP14b6jp5QE$^RD_Q|wN8JvtpYxMqU_O@aN&TdF6R?C9wKC2@9P!ZQKM7_4u5R=zt zSD6YFhmrRi*oC(SyjQ0BDXoOOvN5<4Y8Fo`>)$6XYU`!c{uzmjadxz+iomv;InOh+8$l-!Csav=Q6?}9pH{_NP`2@3-T>)b`{&x?BP5hxn2RsaNyK^DH~i$4 zy!0gA+m{5qm5RB{8Wn$7F@98kkZHFpAZ)RY2KB4v`@a^dbL@7+FX!{D9^Qe&l~6f~MChf*bj;bai|i zYK*j@nJV3gRbyM1~{ljXiP6a=tnAxm4hKo?FCV};Au$p8C5|MR&b|6n2uf~Ldp zog_k045-oR=VMy;z3o1w)b}E)qY=^fb7M=Df9|fTO6rfM?5Z zs+DGh8@Xw~FKD#69j!NsMLSm-&sOLYoJO83;eT>_EVP7-ZMDw+2ZX^hyKOX4koVI5 z3%F)D(-^;yK8B@)f1Ge|or9H2wk7TTor3i{L_&X>y|x_@rILRx+;VuN{#@LVIGKBH zSOym!HPxn>WjFf)`9L)0@-vYuOCR=f|9y}DbA6YM4P@I2^~ER*kYSJ|s^+dQcd_#_ zDgqyC4suD$W!rtYFrXGO6mdgv?HDF~B;0dq5f+G_CJmp4$dYMI2QR#t!Hl@!X`>E- zHfX51UiB27Chp>jPM0{wLH&%nI0%hMGtJpwil~l=3$NHbrrE1-F`CI((bG-)s;EnL zob|o7SbuWp9kJ`XVNcTI6iH8??H{$_p|3%n&jX^cb1TR|{Lsy%C#y3Q=6t{9m*Z3; z@Bf!dg^h+I!*J^s;%74#BY#n)6E@PuxGbF@(zPFiAp_S>wt&j*KAr+3M5*I`MRLbY zM-Jz*X6QZ~I03iSYv6^yHBSafEgv#}Jr!%5EDAwi^mW*V4L_$%?YaW@7G`X68IgfZ zl=b_X^&_q~*h+umy#{bt33w9U(yRgG_pxgRYj36&)=y#h91wn*Xd$bZg-HJdKR7j$ z>V+<(1;dW&ilo_Af>_zEb{PqEfqeA(A%_6{1K=^_j$OHWQ$4GfTCI~>u*Iz1eRCoO z*p^@3wVr~Jn=h#W6lY>XT-CpChW~j%e8U#@xrZ;HI@}IF;$1W}!n12|@d2Drtgs47 z^D{Q|+Mmurc6dk@2TYuLNy7LRH?7197D!?OF-+0-cgMK|Vew<^F9n4F?=1Srwx(hA zv<7M_(a`;$!425bn!zT}$k1BxB=z|)-b##sX@oXDqu&SiUyr z#KL+{H*P)>Wn~GTRUCQ(LHluOXS*PN>>Y*&AN*x#T3o$0#ygP~zs2dcaU)z+A3{zf z)Sc!TNONnD{w@M6Z5l&_+$AhL5Gv()EwHskfFI6by7~K}ViW+Qs`vPRiCihnkA)?o z3-vf9e$Z&s^srv z4Xv+PEih1X2_uLK3`IP$%Zk2abEl)$yDwrB6Qw(zzT~*2oza@wcH)T!<=tz*}KLr*XpHQ(8M2ECv zSY7xUyoV4*IYs1N!u)(@H1aEt&4a;EGCC^vA=+(T=kI$^2)mS=ra|W6r?R>W4D@vL zNN@a7dA-)|_mHVofMDd*=tHrx7`RZnrh^7WiQsP<*#nO-|FlW+HGswjs&YNb#iLRk0| z9hgI3!dGM;x)$^8)`ou5yFCQ_OF5wC7?%zCn(%Y)MyVe1A09T> zNZUPtt_XYyrwOg77?)o^JF_Dz{S}3i0X#}E$`++`bi~!9uMJKn z#*6YQxm^|XzIYJ=wJSy?KixzS5Tjm}s^HrH2 zTyD7D8~9yeT+iLPe~}JFgnF-&bpo`~EM+V{{vr9CnvYh}m0^v0OC1*RkZ+%LV~Ey4 zbi7?F$V#=jQTZ&teN5U|7<|GjicK>D->#S@3H2AyY{#XUGU|;x;IH}hBE3>GEgRhh zJ2%E+kv5r8k+}*sf>E)lD;!#peauNBgl8goCMv|DWICb%jc5i0?T!tqS&>bw!B4Sb zjxrNPs$O%81Br9z>0|PXusoAZC&EU$>Zj7Kmht z3^fQdkR^rctBH$;SQ!Ju;&scnW+^Lxr1oWPxCoacFNMb>=d`Pdt=RWzJ@b`M5>{^A z`WZQHaXzDN8o@Gl`u7IEe|r}f9Kbz!#d_sKWT~^$^dY|>=f6;V^ z85JIJO`RRWa`faO8HwX>@~6-JayOgb-Q@UY@j5vwzWvr#5l2o}ueL~C(AkPcww%o1 z?s0EevqFW}DYe z*y-qg#KwkDwdwq@7>sKaZ{@%q`enZ+{Ufe%|*fiXZ$TsW4^VkywAE}+9p}4fnw`^Mj>y;zw!pOT|Wv?_(oS9sl8H9RHz*jgB^vmCdNX$wD6H7`lqI zEI@>qEF=;^Au;jnK|&mF7GW^+TOdeGdR2a4D|Gz0a7!&_bpBGCvqM7^jIfr5lZrE=~&o^6kWRe z>v==TlSCeD2(vQZl@c#w(#T|AFp3JzdsrK`L`R-7IZvjuy zRgwfO%4>v0st-t5l;2QkGTQ7hyFc0Yd31KcEvc*G_w|<5S1ZOpPAQuw2sJnltik2B zQCSbmGxvp$INDJ|$UZ4gk#p#gmv7?ST`5%A_A5HoBNX$DX zFz{U7m2OIosOyd3aaax;GK^>{2*UivQPsLo8KLW=ha$1pd*eZ?_pb2rG*?`;-h)bf zpLvRsCm75qCY&@q0LQ~!f+2YbVr zKXhpLwO&#~=DOPqcc-t^kT^R>J#sHcSg9+XmZD0cBl+!g>kJoD{vSm2f6pC%r;v|W za1FYO#E&e?+4*0M=I=n`^={C;o5_|hnD+kC0bW( z&y+>}+Qn*0^H2Srdk;M98`C`H*n+9tu(fwme8_fwfGMeRgqi**`^d-D(;`UhBu*m>P z_zP&{P=ICT;{nz%99Kt-opp!6C&rLnXcE5mfGwUI;}O6wS(bl9yn6MDf|aB(;9L-y zX_!e&b%mdh@Cr5*YQ`hGioXkO-Cm2U{+shaiCoB0LzC$!#unzb<lz5J)e~M_H$(azt4yZp?Ia>`(O!~+)LaOUTg}`S03%n7K1yF{ zu{b1J&Le~?f-bjJc0=WYq5f~VY0u3Gm)~$>+U=qnB>(ooQ0cm zE~*C%mm1K5V)f}gvg!2paYbw6o3fDr_Of+vKPVA!$$E7r8v2BX(`=#1Fkcq4mgX$_ z{0hk?tcm(Z`Cde3Pa~%$mmpwM@eqTg#1z<1L)iXbcLW#!H84OI_>_8_f0q&Y@+PK4 z<9(H~nMB~$bKgq6Q|Ey0qJsM;J;~8kq0jP27uDtEZr za&wuZ(Af2fDC_r6f9olP^M^qFZ>@Nr!GxhQ#KwaI9f&&W;KlGH_)yFLe$v19?OldE zr)$t`WrRa`_-)ToM735F3J4=mKnK;c|#$Vb` z*2l&pVmaucj>M5owMYRZt5ZYv97@~#0hhWDqiiPa;yu}qc^xMB(Fb*{e3ZaD*|H1j zw~OS2R3FK7eCT=B*%X34m?XI@b|jy zHzHMaO)_bRPc3!y#K*rSuj9XD$9A_V(*HiifS5 zq4jb%bAQ=8l1vRacw2n41ujm4%~`t({i6zP@zKVIJ)hdQ=>aC=e83Bgkn53zx}JPT1p-cS z@Ja_L%wIk!3LiU+S3mBC?Y~32I6JOnSk0@RRm}uV%dh?7R=*#Jy^BdRdHTbF(&~`; z-WE>eWBe023RIaW*XxL}ad3k;v6Wf9trWtx3S0(wC7xk?B&u%9Auli6mi}rD{LEyc z1S$c*saifA&yzuy(@$6y4he$U$6Zvz!$>h>U?6~fy}Vt>2Js}@1qv!kT7Bnvf=PzxZGSG$6+o#u zl{kaBecsN*Cw9)w;w?gRM*EtDK>Xlidl?_^d2f)-VnITpL_B%Zw0jQrN3S>b;jzos z*Qq5^SR^as?=`?8M979cUhSen$*K7hm5I;noWIj*Xk< z5XBqK-m>$jYr)oS0vOhw-as5?@^>atmxT~o{p&eQ7^sH#bAoFUd$trcvPD11f0qq2 zB7}b2xM_k`FeBW=VWDACOcQW^jhBXwtS{?RzDu|xgf`k_A1hX0un!d9jxD2Idt`_3 zA3|M3$%R{axSP?KevVlNcP3~p00t+YZghD*{X#fZwO7O%>A9_vn>LnRV|mBDLk<4X zq_2baH1l2Hk1{34m|IZzzpPOwjCsjNNY;cqprv-qB)f!1*V|y>prAKcC^jF`(M<4% zMdg*S=gkTCR5JB!T*j&do%w|Eg}bh!oUpyLV0olC8;vB*d& ziqxfNEw*3OB8J@m_`l|{fH92Wf~e!S8O%q8t#vQZ< zmTib)P)5D5^$esR%gL@0b?CveIvoiFb)&85VLzC{!HT?!sx^e$>&&r@-^xpU)dP!w z&OEdQIm*6!m9y90H6|(O#!;ma(mw;9=@L*(-Bw<=44|ABOYE6 zdhLa_c!MA$gizG?DgqOm-HSRW%I8aX^>wl5%a3-jQ%E5F7K}oIVEOMk6mYl+o9sfC z@1WrDzB-ZQlp=Qt99x1FR$PXSnv%YXTbinaFidDhWeV7l$)w|_ZYF0ML`6ltf?|yt z(pDJDZ!;x$%(utfo5J4dfZhh*I5|NqTpetr&I*j(A*9f^Cp`2AWDTc$%;FKNYKmk-7gR}4({93%WSG*hld$iab!kLXAX;?%0 zjJe4+1--g%C}8#F<%^^3S^B=;Tp>3I$Md*8@>meeMRjC4=-9BM)_ z$3&A`Nl)_th?Sp>Tmv$5D=xj^INNgmyb}P%p z;r|}!=rJIEBZq}gV=5oNGcQ^r@*5^mJ;4y;rvQt3F|GeIn;{{BaLCojosv!$2`+34 zC#G+I*nu4OJAk871Rk4OdkV6{ydA6`Wag&Mb?a&a7Da<0tru3)A*(T!Y37A5|9O1s#H4a27ElrAs z4@nhqnlJTdj+Hen>gg`JogLUz<|2i@O-ufPT&clT0o!l4!KrCEtYBa}R+L|!k#3U_ zMzeXiRbcpl0?Lh(TwJ}$%HpNMT+uhl(tzYhp7a3n2LX z_=i->JlDmE=OA)7W@cm-oa=OgV;r3J1=VlxKW6unNxusVe{+8(P|50sz-;CO3ki*x zd79C8o)k`Xrv}}h!WN%|H!+HbtfV8ce!g7PN1ibUZR3-BTZpDt%P);Vh&1a8!BK!C zBz92pJZ2lToalVP#6AhdEfFNf_rwDqS#rK7p|g@~vYOnonzt0X@|!Ko9U zFnQ!Lsk#9ax(qSiXEzyv7wN$3>H{oQH%L(wL4U-jCdZ10DY8T`RwFV#E<;-BF0v;r zWozr>ga+K#XrHYhqt2ix^$ykmk-Yjm8Hx5Mor0cb*ZTm8x-q5e%mV`l#&y)P=xoz^ z4u`+__`eqhBsB9R3}y5xvhf0D1)@<);*D@=IC3_7lAW31o&8k4gSsj9J5!G1R7UY) zUY%JFzIHbmwyQ{QIYXa{?{1McX#IqNilV*S|K7B~aw<(@Z;Tdt;T| z!b$R%r2e`=-;JkWjHZUfgNyRWTYrXuiU4hHw;#W@mp&)O0-1oi`WIz{U}*90Eg4Vh zB$!n6R&wr*v3h25bj43;bh-S~%J0zd66GvEN);iTamxc;^9rX|pYVho&2)vlMfrw+ z4j)~lop`W=3&9Loau9aA;s?W5NI@ zpkUr!WmQf6!lqZv(1R}GRB_C+bKZeY;GC(VpU@_kw+S(*>TvA3bfLKmjG8K0F&Cph zd^5Y$PX4M}V^z67SrnXO=;MMj=We<&+(x=>j9mp#{w6lmh&0JXUx(s&fFJUk;(>6R z^iyH1jRZ}^z52D_N7pjQLIc;4)(Wt36ebHRu`7RC;GJ@b|4u1_LxEP^i)|M>+7#Md zzB-mD`dhR=M`Cj6!Jy5Z&gh7e(<;M(B<-gHRm#h* z8OhNE&}d$f9%eao1QwS8GEObJ`E0M5`I_qC@WA?HT%$NZPUa*tBtgbqUF!G2^}ory z?B5>-EH>2eEdniS(~Ny1k@cld;;iVmlslAy83?apB=3o`hg!bh`bHoxRrs%OmMF;P3wBCbYY zw=)Gt1T*QaG0SDGPJ)hO#j5jt9^Pp~e~UZ`@G^dN z2sY+&m{`wk2HpA1p_?#)!zULwFXF2xR+3O~ghNoa^by$l>vOcj%!1D`lo*I{O0m)~ zlAd(^z&7vTHJ#_Z?YW_+cogqzh8-D`f{OTk!DX4I+{jb70`2zUjz zJSa6x)7EQz;H?$E*uUyJ?Q3=`QG6@Cr@iW}^^BA8v4{N=A6PyXtAY3}=J{kulCeP*5{Ers;nqy2WT&`L6=ncYL#n zr`^ndS?)O2nzH?fm&dh={wQGS=r^>$8t%0A@Dete1qKYZH~ga5(M?HvBoG*hq0~`~ z@l7?J?FLPzN^bS|%h*XUfa^nnX&2TNm#XF$ztZ)?#`q4I*g(j0I1!;(+XD9~r)Jh7*BwsmV=Z~rjDV_z_iN3S}sBPoY3gLG=~xAKm3 z_>X2N2G6+6?h;``pW%J+K#34pBTggXdq7t8w)78NyvdV;LS~(v(*GA%Zy8qA`h5+< zW-B2Ag3{e3sdPz~fT(~pk_yt@B_&9g2#SD!G^lh-mtfE(-6b8qbD#SCpZB`<2M*^P zV6Anp8DowyCMW_K*^%L=q?E}zq7JG&-vG)mg9Ugs5%xWVy{_dpUe~rS#6KqgPDny3 z`v3Ex4??S>CEJpW9enKdX%`N~0b$Be9J~~1XvJwe2{+- z0;np=c}Mp7_Zv5KwTFd)$&A|e>#cPUR^0Q79UhC(W~eSVM2XA^2ahsJA+E`yiMY;D zlicaExUb}oq?Gj!@xohG6=2%M5;~`2d`)9EOKs?J_HFYQ$j|aIN%R}=$!{q_LfR7e zXzxiQJM}Pem)KMlxa4xM;Vmw^1sX=iZlYufO!Fq5Jb+IT`U(Vx^QHWL~xbQ@j~+c?OcAlv%Lnx*p2j1 z9;T!7GUXO=L`WE{q;UvUD~qs6U?|5vvbLcZO&IYcq(%SVM={0!Q1nTQf)h7(jQ=Ed z%5+YY%d)Srza{5j`=7HV$z9L|EiZmxmDTmjd3Set15)%#SE&$VqXk86(`Y>u0k+#2)qiePuQhVpPeE{{Ga+hCW?2KW*XEtU;@+k2^YooO(p zJtWm`&zET3B=2y1Qsh9bl<9~eevmMjS^!qDCu16yON${4*4jr4u8zX^%xkh!#cIXp z{1QiYk()Sn*YyY=C}*918XntFqgO%G9ieR{1}t+aHn#B zi#Q_3z-C`Tlz!7r@14_`C!Hb1`boMxTEXIiPdv==M$}DB@;*!&(g%JLZxt9Do)I?PteRpdj(Ybc6*Jp3KH~WPlSSX z+|ti)kgJlRvT)@RMH+=5?e? zE|B4XgQHqiwA-y&lTaLU28ss>y++{h#2TgwGLy%p^#s^PO4uL4%J zbmesY$cKXlH-WiU8#NGPsV~~@dqz##h%HZ@{EH8Fq#4mTz#fc!^?lyi$1ipvukKo7 z3%kE5ZMS1O4LF}k;Ej13$Au=E-V(ksb9;c-_?#DQ66T4wkLoOf+%W6**zk4NIVgVL z*z)<=c$o7sI`B!=QV~l~;l(0Tt^p5T@Frz^mxDwSyK+=V{d;hv=UV0}`_uCLip0WB z@i=D%Hv`q4@0L^b#+h}P4`sDaA^pse3;rx=_yliDgXMInDeh50w`rdK4^dSJG(Q=5 zK<|W#MS<)od=g)|CjK1F;Tzyj)ND;u-&aoNx(Q0V^!`(LgmP@;lbH^&`L?B=G=GGX z(eyWvUw78v2F}U%+~{h+WqF;p2fvNKo5nbu@t% z==G0MS>1TH`^=NJd;3`lu8vFTZr$LmJ7LKoqjhIQj{%_PI|-N)rh7?DEjIJLse4J@ zxt88wmN``KzLn((gVC%<2XuPwAqx@8oxm?y3A6zGIY^YB$Ke+L9xNbTxR0X2j5)q8(odc)$ zU#t7ygkMs68+{)w-dHngFXg=H+3pPLjcg-r>J5_`ju&{QOL7tE=&;BC6QfLawB3fw zNnCz(QEacK?2XErzTS3KP6E!(OMLg%it=QdoYF9D*O#yZxFY+voTJ#L$>*o~`mg6Y z5Sz7D1*t48uhd@)4KfVWNmj2xqQp2L{X1sF&oxoc`JaFBEk7moPtJiuP|L1W*tv8&&D&@c>^@GrnSIO; zPcatMj|90=s1MIfN|@1WvnpX}QOk41o7>%^dG{Eg9{P5Oy&M*~d^4=u-*Hw6Kl7=> z1!ybns^6rD&yh`QMCM33?xX~FTs@A#?`YcR-G)VHQ;vV=C=6*RF(@%+DN*5J*+G4u zkO(rvJ{HnceiL#mvchLJ02j3u3;kJ@rhT}k;wm4kw&hNtvtV?8QscZ5H^m8S4#D9| zvYm_TrXpGfNlyYBQHdEen91Oj#WZz3JC%tm(|QxJR*8mg{&DF5_X)Z~AafuN`b}k% z<$D)-_p;`}vgfe#%9qO)=+m%hIDoa=)f_Biv{8Iv(Q3BR3T)(@$KdFf5t08TGOVis zka)X*eX#)O9M0Y6g-mV8nL8?EgV|M-#8L=GV;aPfMZhv=*u*>By^k|G!o}UiZPM@#5woQH5pQ+Qs#RPc#e^ zPh8Iw5R3#uh@;EBvIuzDT^XfX#YGRu&6&|diKN0bZgbF&7Tw}VMJ;ND_w&2?k75Ho zR7K<-E*tUk<Pijk-0zjN8sp=?;2Dw8tmr|`a-$pHgQqbq@Fa#junHm@jhPpCRoh=ut=($_<2vUs$c>_ zagB0d*BfwXJ1ulC%n(;R(PBZHn+JxYHjuray4+)<`9H5TC3>Bty=JC(O+CQlws)S$ z&dE35AjG0TSJk+Z*Z_`ast79Qmz2U2TR(ZeQt+9DU4&%P_jf`9VEe0t%qRnF6`u=x zlAmyF=^mCKss^O>M$mya(*n2UJ@(eM~2%S8G>{3!QJ~@ z41}Kn{>|SyUHts2gu|(!Y|~nDm;9F144?ERy1IB_*-b#zmQDZ?6DwBF63gZ6)9+og zG6G73ekFJt-K2|1A9DT7@cXc8o==-5h>RIix6VTGJN`$pQ#aA;X=-n$D&W8ye6@QX zR91PxLdjR!KNES=k!e_w90QDywRAQ!W51+4TM&P4+VqEari4A3`tW<+V|uu+#OSSA z>fW09B#MVzE*0@GyHut2n7`%$eFD;OFvbv;8;;hv6yPa&!RDF6od8{+9K8jT8F>=R zqz>(4C|oX1Z6b}!j)5^Lq(<^6JAwreL$zB3_8he%EmUns z%jLY<^}r^Lbx!>lUhqq^im+03tr&mljgW)%OKNfUMIL`Zy}r>U^ZL&C42U^(Zu?(q zj^s^#8#6)1i|m~nth04P1FS0%DE>dP$hPAwgX#TXO24p@`{w6xV)f+IT2;S;T|1{K z108aL0hX@Acc( z(pw<=tS;0klcvyRi&hlDw;cz!o0YZS{!4c<0Pa=+nkpeD#Vl%27C*x~L1u`6A}o2U z;Z%(x+Z!mgUBs775hj=FS`}M5%|Qtx;=KaU!KZ6e0AMYE?1;~0^?{p@kdeplI7}LE z4g3BV+zqTxKj(N!YQ0xYjVt7})%x;kSR7|5gaNDRtvPI9A6v$mtxjaotaQjDA`grC z3MgTN!u?*;6FA55;NY=(UiIF7<2JZ21)=XYUmA`VMi$>&hU8-29C*nQcEFyCnkL|9 zje(2ab57@QI`zEdqh~=T-ayC}tD0?iK7EUs5K9nUKOXB@l$rjU*`)A&Xb%1<6r{3= znrfvXkH+pMEr}mPfa~?Z89*9#wCv2f z0+qoL;)ve7fTffJaBq4`J1vuf)+!058fi15eUBGNo^c9V_P=J_Pdu!W5uOUU2@{{{ z-V|OLNa@Vr`uJSG+EK3I;>23~1!4OgMfNWhzgK^#KzUbm^+7BqzNe_`dJH(nYwN7 zJYc)pgekfFi!;mB9M)3irZCguxS_bW8B?RQS`I^3$VgJsr=S8oHcnGzWd)Jki`oC znfx*Nm-`6JB$sDjXTigf=s71P{FmrwQd_RnymWWQH^jTVGOCS9O#MB?{I% zQrhigK?0Bi#JSC{sYe_B0h~iXQke&`Yyxb(P=*aW`Fl+m8ArcV)Ru^iYGb3zkPZDL3Qso)J0BEXL zOnDtR*lKthIc5U>yX}kw^oX#p@~!m*%@()eyXXutMl#sM-CTkv2To(taMU5hBg##X;bo?1GUw&j9%=FuX zf*OqzEC!=ss`y1lYdy3sTKqt#&agF-uT)la)&tjXQ6MdPw&oK`-JBro~SCh&!A3|eIX7`OS{nC zlcjCu96D@L;TiFcMNQ8+%(Y`nd-KQo|G+YShl#uO|G%yAAKtDRmy@8&N(NMV7;Uv4 zJjKpo`WFESb4GkMXFyzo8W#1C5G|rn9&C5yXYZOkS@?L!&VAJWC zpDH1-W(_!y3Rz)ILS>`g!EoWDZx5^NO_FyfmNcoK+m70Oe_o#GC^mK8?w~)mm-FQ~OTCZlsTz?yK=9hOk>xyFu6vB8RVoK=R7G3qOFkV^#tH4!N?)unily zO}2UAO~-RO)U$GT6vG~kLBevOW8bFFqg%%TJ{HpwDjG^8{^^ty2UHa6q6g_eI?#Um zx~sz|6Z1`<%|Y>73Hw^eXxXQIXVg~vyb8zeM$TiT?%U0n2Ecw>6#s54Y&g%#*1LGNKA|y}5+jKe)i98<`yB13fO6BvkLxO`H_#ADE82T0{I51S21b^$ zbE?x9U7RTTj=>lxDUyu;X>{U|Phjr%FTT0G=5j3}?sR87BTW=^V%-hDhg9NjTWj4R z-R@20S3q3;JopRw>(?*YyBA@Hhl9U<-52{`pJ9)SZeE&Op9UaKKku`Mgbd>D<<5j?WeqyvBy zPCou3ggFNn zNTckHDB69q8b}P??=+f$pQ&zwws`)%tdO#v`Ue$bmo^(wufiD%57@@so8sKZ-dh?V z2_84wW%MV|#!Aetvk2R5poMX19K?Z6h&?XuTR@ZW%xJnvh=maVY|l@Wtf-AO-QM7NfVmSJ~>y7I-Ef*CtRoUY9W+=#Ka|llwDaQHr^1fETt2_9Ip# z>f1*${zk{=nO{ud57U%wvI#RYJmgATUQWZ;jOoTh2e^aliE&lqatYch@2x|K8( z+lw0l?`Srj-hK;+c3c2lCZbeJ2)zO3@o=NMvU);|aCX>SCCQ~YyFj42TIgcwU?if7`L zcPsB{5QbU7Q+~0$X*s9Vq>G$sNC`&4weUPY#IA4eYCT&Z|Gkv9ZO)z?f9BTr)ue0s;%zyLlKfgAH6zJSfio_Z$O96bfjynv2HMidN96xJpWp~I z=A&PSl!G%zUexTXg`v=TF(Hf7$aAxi87in0k50P{-Ap%F5+gp$1icH#H$z{hB%T#S zXBCcL-C@ae5CIJyRX>Sg2?4Vklp(%7aU%iPKhj33C)A-cPDQ#;hxPhasuYm%O>v3K z3BzTf*YZskVv{zSVyA%9)gc@3fBLsyw1FlHy zNHg!M=R@|u7SpV-xNAz1jK^fW>37Qk@O3`$>4>Q$=0=1(<}T|74I3vgQ@*z603Yca zP7`={bU}j82FfxYIA0##<^{UcG29SBj-irsh`C8n1*Xau`40ZN1AhQ*DFC$1^%#u> z;M|!2$CnTnTPLCk*aN?}P_7U+nMj_~{hM}Z0~G0JiqgWfKaW=z@r=ntm}QA?U9@IT zjEv}{PXfues6u8w5=I|hG~Lhsi#;z0XDagLL_;1!d_7`>>y;?uiHE250J%Mbl<~(U z;|nvfPkvo;X+f2%dogbG{7MS(7Jndy>@euODiR z3GY~i4EH^GLVF%`v0MAggE9m-x0t98{jC5V6#0npQz43+2^z}yxsGVl`z)Rnm_@43k{7O0fYIOUlEeh9i4VY-u<*1| z5!L`r9^Rx7ctI~3gU0OtrL9f4)ZB*f-w26@t&@f(8KGP2M;MPc$NfJhMmTh;#G5K!wGG?tbF}7O%|%4J8)b+(OHr{**)AW+mn(jcO(N zb|TbEaQ7`#SmnS)xZdaOFZ~1t`8AY^5iTUx?a2Cx?I>;eJ^LiO%dr=s!q( zLJ&(AX;ybiHm*O#=ydu= zMU4FB|BDjnwM72BvI@5y2xD*6x!Akf=`nWEP*L4P=|gBv23IU5DV)b6mPvJOl7u_{^|m>$mxSuNIUr3pp)=z>E(lfo~101FSH6P(YqUfYelN z;k2%@6$=9Ox`&(r82&kaBq8zN*V`XybC;HjOU;W~hT*8#c)GYy_tOXB44;%U2~NYh zD%ef zjo5ZX$B_67x%_9!iaLIH@1Xm01e|w9yPZ5x*%&bAnlp&e_W?d6_+Il`_XzyDa)5hQ z;{n#F$*6X+BpIZj8Aq?jR&0`suryd~3pU@CG7|70U@8<31~yuk@u?|G`5qCVVhJ)x zYRve0HyTW^FjtuCsyjcHb3!ecemV3Y^obY`wo5@y{9<{Cu@DC)V?E_9yUxZc+5o zqToZ+#RVub`Y%biXPW=>&wqUvOCkUm$34)BFrkE#8Ie}kuX!&ve=vd7OmS)}UjeS> z7_YQTe#rjn(*0emne=%G7{h+7Am7JxgjF$Ju?(SrGKPc{ceQp`DALLy0g$9l6?H3C zAgj_ku3ZQSreK`WL4=)zY#;oc2+R>@A50deGHnheNCj zd_&?!y)_N5Z|iYyoZv@5NNe7Y%dXs^f05pxK;%N7fKOl z#Xop@s<$F!P^Zuq)nR1K;K2KzhmFdwklL5EcAwYy{xsrp(9jL5!U-}veNK(n6+OU`Se zmz2+txG#-QAgT5MMH=xrL1kMw9mDJ(H8tC@1)@m?I2JMq}Rp`1FS|0F24* z^%ZSid~P~{Qu&%W6F$Q*MSs?dFqLQ>;gq1uv;;Q^Ywz0Dw5=6I9;I?K&RF@?MorzP z;`dw5LpPj{H7~IHjBzj;EHgahVF;FE?gbN)Ao6f#B{X*)Y@r0SO}nZC7$ma5LgY9Y z$y>FW&!bQ~Sv>URE6`*U3SYHQfs<{cF!TOX(1vVo6PzPg7W;=}_f0~$h_H|7{V`X* zXgb_5d^gwerV3xh$Mk)vs7w)ORms}^-P;`QO$U-p>c=>XlP`@Vy^l(-vuZT$Y?xud zj|@~nqw9%^zS>+Ap7S5V`1dK$Nk^0LCYQ(eRxkCmn`Eas*+h-~Mq(eyeIVtxh>WBX z{mC$07qBOBG=t%k^y#Moy?O0YqVy-n7i%BJw)xxI^yBIG_Z0JqkL`rB?9(T9CuJy@ z78NIMp2oeJ^(i9Fe1ESWFmUtjnP#P%aUJm6kM1;`jcuaij20{3{E2rp-J9dWnwZ?Y{npQThAox_Scb$eY z)dWZ%ft_XwK=Ye7TBAR>WBvP9zyo1K7qfO*{(1*S{zS1GZqivk_0#DRKDw9jD3@(B zO5EIxma5gU29;l}=V$i?k25i^uHL-eZdk*98#6K1IW=D5-r4#EYwECDRQ5oI8drMN zyB@a58vleR_ca`ox>;FX%1&w;Df2avP#;E`xzF-{_Sq3UaDYN6MhJb5yv2_Aoy7je zxb|Xxrx;B1ZY+<_`fNQXF;uLhrM`n;oWVkX-%KKyO5!l*@=GT&)FedFqIX<-JDV|A zlW>(DeT|66@YT${t81vqqsGPTY&F!NBHoSwoDy9ka@u+Pu7ebFLfkK4z9_~$wS7nH z5ej=Dq@Y+!7BaSAy$6zfP5|3>*2Ji9WJcrVu)(2A$DWV3pG7IE9Rg3{x zbpH0$?l1qYaySUL{l;Q%6%634K971 zpT!0RmW{{vS^L#XTAJjIgt&A6j9vS~_Ten#4;9;`g5ekspUvbvqK-MqpY2{v*Z418 zEp8ubzgu3kY;myYatq6pk_}kt<&F!=;Xv4g!f?&E7Wz-<2-_w@PoyaR)PmQ+0ysQ> z+gt(zp|??=AlW)i(vv0{KA`3=n>G$Cy#Y{0g~JOr5$K5?V$B3ymyqE5UlxYKF$2it zVzelKj+rYK;dCrKfanw@2l=`}A)+hk0QONuxqCk?gY|`IZjcLDgbu$>Vj8eb+D*q% z$=Ds8pLyrxXtg7JY}wq{#XnZdJ5VV#S2$+1RZ<;lwlXI=Kh$MKqlpyc5E0P@W_TV; zZ8RCby!Pe#L4a=dQCd#6Fq5hN2#~{ki%F3wX|Bln1;bHUY z&>{V=SI4+2RFcu~w13U$GxP{*5aC_7?sTg|@HZC2H3{dzRgG0>BE@j$%@A^U2YgNa zf%)fyagA)I;c>7&CVXIhjIC$dU4{<^)-(AzdX+y<4?pm5l?jTSFu4cbb!)-G(dRXd zQ7-xF{Ej!{9tEykOw&{fRg_Ec(=X?E-S5DIXxY?Q<8dLy(lV7`zu?b}I#Bmy3;cw^ zU<#)}%>xQ+3i${tk`X%9ZFq)RY2gkq*25MROf843r4x*nVE^Qsp&OT7gSwuqSOi4k ze^Hgs%>Ei5+y_j{CDGlR1PO85AyHV_=<1eSC%VBxF)hwo&~b6oK%#_u!E@26=cZuf zwynic+57eO-5&#-K7O{$qK)G8)B97|>dNt4-fsrpvOMocZt4PI9DJv-z=zG~j`f8SxJ7epYA#i!^dVhC# zya@lnAI)7Za|x2QuhAxiEY6ld3ur7xOi|S{sHY6@&(FjmHNo&pLDtuEIWyGx9@ZgV ziC!3%G?l0F%C0wIDLy*s%ehKG(~|0d8SpG20ZAVsbu=YLf0>s;85%BTf09h7jh>ch zx(i!Ih#^QZP)ZV1lXr2pr+)4)1}re8h*@f}jT^=HwO`tf(2}@a0H2bdlue5*+dF63 zY01TP%x3gH2W$1vzg_^@&btp?t9I*dJQ}$G>fT485I%1Ga^X*g{7x0IQy_9-h57L? ze=@KOxKOlIQ;ONUF_8{0Nn~0%&=X{M^lvAxAZ?*ZPe2VR-_OYO1KU~>z_+m8G{J#M zEg3osP6r;LUf>^aWTiI+JcDiuiyPg0q(vbUW=$H`NT!O7H~xFZc#Kj5?Re`Ceq=*P zpL$$n{v+vq&CbKM!`WK1zF-5#!{45}BZC5(*)FcP)A&|2mp)|NIr;Nb+Vzk9gGIMV zTCNgS&q0zpzBog<@&TSTH4Q9hPM+mccoHtDs0pn+)`-=&_fZF8Q-zQC>+y$I!Fr5I zK;Y-IBTQ3?;F;%{)P%UNAQ5Es_}`%+Y)8CXcxy{4O4g+pYQEead9Fk5Aw&{;=Yv+! zhaO;Fm*O2?*?LKlAVGrdNVkjB1e4o&aHF3&O?%($7r^MhGYRxGgKXZZx1yoVYq0Qx zy7Pk$F~p}f?3pxNX7>5+#GYSqnlN#J{?dN(>K(?& z^6*$b;@+?W4>arSwI^)8|~It9FXj?EChZlQiW6@GJ+5Q7PIGHq{WYQe2(!?#F}y2`I;#+1bX{JOm>bg zg-6hz{PSt*M-Jtx+ga|g)#4+LVxru%oXUGd*X>#_l5VYNDsBAX;HW1Sh#k=7jGyZ{ z9~QPONHDxy{OsnbuLRe$^F?c<)R09DwVK^_y8vHV_v70hLO)F#ZwTN~3e6CkLM!R% z10vE*F(Zzz=Cj-lNCp-e7N_xNl`CZd*uz-$dSHGYpwI|Bn}^1&L$5MRz73Ec!~Zpz&|Pg@Vi>*sL4uq-J{Dl#dX;p zO`I1dNEE|JX0krYmw)T9lE&#r;Q~1^#ggZVa-`v7DnE6Dmj@#PyJhRgr;ggIoR6F* z?{?G|L>G+s`K#Q;*pKNjB9r!coeI;RbbVmb3^^7M6cz_wx-$0eYNiEo-U-UgWp(UR zfd%d+j0l|AFvRMMS~#j;#qa!LiN%fDVT^~8-S;BY&Ph7??(1p+?e_P+sIh;|%Y?|) z)+lR8lbZ)=9X6V4nkGj2Pk*M>7G`JIEzJE;bx-DVbT%6=>`gC!bSO-h;*(XkQT+gm z+0f2AbQb8RrIVzJe@$bX>kiA;JJ0&e8x^z~A^Pue?KgjOJ=A*nEx059mH#q|mA0vE zp0}V$Jb;fN;|*W_H9&y2~iZqP@$1G z-<{<=W>4G(Y?Tt3qx3bYf4kn)XSF=sNc`SCN3wf(@jIUK=-h3z(K&y&!A+T@@m}Aq zGaJc>VxxrcgLf}!;$PmENP4g`v?5mU2ZNF-dj1~FafikH8f}+Fp0Kq0hOjq||7dY@rSDxnQW(?g$ zNo2sNx#kh$(wO`mD<=T*U084AY+q$mQ%SgF7Z~CuuYx5@rwCk^qq>!ue)Dhty}B*( zXj2EI?1DB&L0Y_Z`Lk4%nVPf0SE)a&C+PDI=NNo7jbA^x$P+7lm&1x7+4X?$!v8*5 zM0RY)g)N7|&-B>QKJAY232W{6;@Zec9*QjJcxZ}~ZaF9G(f z`)nJpFm{c=ZKYenxx17vh@{*tg5S51p1{hsh2q*xoYXI%A0FJ_9IqUp`t%+`y(iVK z)UY+94X_vtVK?SuGgRj{!-J^m&(Ctx$t@RVoIykEG*%0}b+CakF0g)SBN+`hqewi~ zloZ1eN@#TaCL>Nz6uMPukS@+N3stY|Uij?7!k69c&l`^3d&H@JhX$HD&kdLNTOW^B zxR;&nej#uh+_3DA8{e!|7x6s{ryT#N)NebLeb~G_V2f?yfEq-=qJ{`pnEH}U^05^w<|FIKfOu<5zo}9rXit9DR}@;2(`<>`{s689Opn??Vp(97puyE zJuH-^KQs64qG4QAe61k7vYVSkcRJR!bJNn?A}f*lwCb86qN)SAD}mwj6nJJRvTx-r z+!}%9iS5l4NxutZE9SH#AAg+c3>b;Jy!i%VuR~8(E$HA=)P2(zTf2PsmIK0QrDQE@td4Fw>LOG8s@ zy6YPS+&vc(wI{ec0yG-TG(zqw<|H&5<_bG4bjC65elv|$PFMIo9Y!)C`;B#ugJI!j zjM2JfF$;o$>qelo{YKu`DtRYwuHhm*g)H$mX0{ekBxr+C$2o4|6N`N-6fIRI`=H;@)` zz-?#Dy{)@QHC-TJK(`yuQf>SbpUAn})0VAA9*_M*m5i1bcUE?Dk}^Zbdb&22<}1c} z64(4VDig|c5}IJREf#t3wnCteb2hyUdMlBZ@Y#DjIs3vJ-m1YtcS@RZ2aS2E$*9+R zj#u*-;1UUx~W`^o!v`{H9Ll^A!UEDXkFFwxJA}>zxEe zsuY46IU_NxoRK9KJSQK4HIp-m^$c745a<}mP=pzU;8t&atC~kR!~cac_(IB`MmSv*&-g2161(n?3{4;wQcIwl%~!#&+RcP& zn(8s~SmUzCn;(yg>sQ5h)V-e7VXAw{;8!N!7wzx)NZx29IL80-(16^jRT1s7DbMvQ zvV3(){jYATpzzuk0QMv_xks;PAuhYQcLi;J1kmZ^Ei~VEQFJ(>!B4XN;eN@K*s5U7m?|JI{APY*^dJ3hP4Nr7kJNeCC@(*o`3?nL zH3~Z@>!AKifmlgHG#5!woA_it?TG_|Fu$Z5sZO=)$2KN#SOpImoSVLfuaKxcfu-1vO7ht zzV~;AypFa_CM!Xf{o`tg;{c`Zt&!{c4i1jDnRT_}d9}I7-dbRAJ&+IXmhLgQjr+;= z)lPK<#^+t|dVGQ*T6e~B+k@icZ=i%%M#g zv&TOVLT0$~j`6q0PR`$+S6dmXx~!?Irrh7*@_Y?H;xQ`5pI-AP?8iAIJdMrz@?WRL zIHP0?=0~W~eL&S#I0Wst63YS z)kUZi(aaM_--8;BdOWri zf(_rsL%czy7~{M{K)%=ZVQOrBvFIhu>T<3dhITGPYVY01GgkiO8}qY|!k1H9`MTsAHX3<733QpF|(Jkg`&~w!lms}UQgdx8Z>?ZuWg|eU>sV@m%3rhE{#_4 zStzJkIQ?q!G0#XVXC2VJmP5!)6l8vM8(bmj4#cF1xjZ1op&m)WeLI1kS^oKTs>AQ` zL&{_kp4p7nhhqH)BRqz`W1keZDz1MXw~YrML3c`Po7aF#mu!xGQsL$|@6Ns&=zG4e zT@T)WWrn`M81%$8%|rpI97?E>AHLYO|Gk8Pg~sl0Z@B$f)gIXer7Y@n+ZQ940B&Uw?InH7M_iKWyQkKoNln-Z9K`30$!v*De0} zIY{rc_DFE%$a>?k9Tol;ZJXU7$~>N zq12v=6r-6oq`WOw+!7^jSZ!!bZg5uj3EPpt^#P-b_f~S~xL+aul;XqhDJCXS&H}5F zt}o$4Z7Js|L#{gK-u#v_TC9V>BjD~60ScER;8%S%KvmuSLZrUj+RqVmw^U}Cdfn3T za)2%Sbywv`uD-dhx=n5>chN5+bs6$U z&3S1(Ux_;7S)wx(b#v9Idc+%LXY>HAtGFBT(|fUS9e;MT4zwRJV~2+!kEF8-%24+&dG8an{@g*>=NT{4_-!z=ubpj)9e~n zlBL|Y+(8fN()pd`jkhN3I4r5(zEhb7{y+IzMX%oWD&AdmyRlb8@X4m!8Tr%(-K!qTeekCXg+)#plkn%rkR|R19@#xXc_397VEi^;V zM|8sP_soIrVc~wR^Hkz>&+OJKq86^?8?ZT{VSGHTxal{($s-`a*>qp?;Y@LXJ?I<~ zM!=>=$)UZ^_lI}Es%g~}5I?Rrz9;#pEf1bv03?iM(IseZi!FXJo1lsO9IywiB}#cJ za>W#P!q9Vtsonl3xtx92r`^&WK#;!5yj82TQY z2cXmbM3GkMqtQ0WT(7Q}E={$ZQydz2FCT%%=T|-ETnb9Vvb}?Qj82;!o>;7J_?iKaa^&ag&$CrC)j3nfW8ve4E+)^?fFvckNzf z$;BQC%~4+odDfh_Uk(GBb^g-CJycbde9oKHzPVK&7#cYR5kC80=ISD| z9=MEk)Iid8g{@l&A;9d9NmG>>ssJ6(!DtoJbt)vvg3`kuYbmHmh0K|b>fzK{NvB%ihTvEa zq8kF2>a2l!@8j|VYRiP6HAaab#W~U~$FlUV5=I@N90Pi4JOQD1SXE%X11G?n-%aeI zQq_2DWCM8_`s?4F5Yse~+|-@7brR|@t`d^htzUGQDQM&sh{1o(QAgQ)gfYuCbL0AK z-Yiuh#4}nxY$;*8vjftePR46vS`>%R-0!xTg$54+t#U=cMkbH2HJ%^$cw;Bv9n^{V?Z7Q7uSrSZmO=uYa}zbfLm36U$-;Qxy#1ou zBm3MAhsEpkzF;&q4f-d`Tx670v3zF&Fp=@AsKfnQ1#PemAno^!O2UUz zy>R&FZ!L)fN~sC!mrRqr!uP<>*N!dc12+bC=V*zMWZ(4)s%s)gz*|Mgh?CxIY5E1K z8MjbMYUn=|C?+YMwi#RW&(-i_8&evKorBo7CID1vB*8KPnM)`OZ0ajm$=Jo`>aY)ee=P9GmmF$1-{tn3|X^n*O|ApU9{_(c=ulnWyYDiZ=sw*w0fF4rXTv+ zibgj9k?5nKnOJV>8*9#K&5`eH>Ry-9%o2TjUiB1jMR!ls7OOWzsUKJ;8}QXmJ{2y% zi2QwAmj8f8Q`KvA?}kX_Y{d;GnTECapgE4{;{!Uh=;`)gx>LowO82GF1`P-XFP%$( zq~GmcivAw}LzkbaBHG>18ak?X?4KhkRnP{6f!(D226s;xR9aNyY66iE!6kr!X8ES! zT{fe!H6kTFjmtnpuaZtPIC`t~!;SGb5~lK9uQ-wus>m6RADey*TM%)MdH3}F#El#s z*Nt(8vWkY7k#D*4+&OikUi&qn5`N=>WbRO94pHwK)0$-mUr!!MCXgY0Pp8U-i|%0K zyKsqs{|MDEiU;4}^)Iytphb;1AuNK zjN(+l3_AzD))fox>&ZLeN?sKWru4r@99cv_jZgO|MX`wlJu(-lE-MB+%bm&GZ<^A@ z4U1!qzv=?<{*IO0dgx9Ez^fC(4E=(d(!+TUxTtchpVnVvZ*XwX5sa=xv+f8yeSX|v zZ73+%{Mzn9=rLsV0T&2-e|-YJ5;mQRG0WU3>5xSvG1<3Tm1ykQ3|T7ij~;JaOP%%! z3Zof(R$}1XrJX+987p~__;c$)yjO!2?I_1IR(b*XYl-o`uXV-Ng8Mj0=69NZG5Nbi z!?sraHP)!0!gn-$^ZpqQb+=S6TFrXa>E$wI-g9~F?$@XGmD3Y!We7#lt*n(O`~Z7bj3CL>p|D_zP-QuX{l5`s<;xJ z-}}RZe0o5X;-R%)r3Ytoj%|Hxk_*t~*2)q8Hi6xUoct8zAz@Y|xJ0}%joG`-W~6sj zQ+V!#VxX4S?DzWY9qlc)JtN$fL!0^yV?)+l^*2A2wJGnH8sB;HvoKX+!e%>QZO|){ zt^{ZVV+2}nn*88VzGU|ILX-Z?7c0ezo#N-!yOx;{;PTH*S;`Tcw9&xbZ8v9J(Y9J_ z-~`DNe&ZU|*Tc_>%vtqQFVecF_waiH8z|atY&4!ZEa~rVVEA<&Mn8+WqQIr{=FKbR zgYjbZR|X=FU%VEz?;>VpHT#7m$VZ=Miwt(SfX#jL;$SY=MIAQ@W zNVvNTN*X?hTZ7GqvJ*Ho2UFv3f5%U628d`Lr~p`NI#}L4kLZCbCXM7X>jb4}Fq#AC z*rk@S@-SPF@o`8yp?}5@X3+uQb6O2z*5vg)4}6?HaUBnNC${KkHE$Af*^~Lo%WdA6 z7wB~DcgAKF8**q@50S9uEz~`mp&c$A5jsEIZk8!-xv5vPuyxU6Cn9Nmj3ZLAC%Cuo z+Wh|RWC!D|{)CpXlDS^P3+%-}5m{qqA6rvqR~ha@NyI`^vC^5&R2fj$*IIlB}Kq9kHldwLM||%8v8f4Tq6n&`^ug`)GTg z%KT^qD8eZpdxFyRPQ$dcQS{cshLay>h6hj?)_kA$eA96(lI(F%gZ5ng4 zW{vC64FR4viz%p)i{c8(W=kR3E8-T+D5UJMjX%eEu3+0Taf-JgLtvQOJ))fa?auQw zU<_JE^x$ftlU$6^RfxL8wYaP**DA%k++L|LchauZ@^BX2-5b!9exB49sDImf@b#kG z3g_U`fYKI|id^+CzOCKrV&4Zoiyg`N*ToU|9*dC*J_$;g z8aXCooxgbWeJUR`tz%bc&4QLBgBKuP7m9xWG>c9^hm7H2cmmpFK46=5Wr)|RzPhEf z1S~0iFEr#8r`;bGd%nf-QjSa1%50x-ba*Ebq+YG9b z{)@i_%NM_H_z&wbuMzrJ8oGNGq=a;L`K|4F-+S*jb7nXW#^xWtwbrwq2tI>34fJI; z_YO`{=3&5P>86(obg#1AR^$EeT$X1}yX9iFt%?^z{iK0_;MmCPG`nta#*OwkoVx@` zFzY_-VXd(;i=7cSJT5<;_EB-*V7C+!*?!S}v*}jZWJzqAI2faCunS1O!3Di zQOYOMX%jxH4zR zBic>?)#X7im^_5PU)uQa)fvtu9aae?s?W!SOH8PNggai?4)SF*gQ3>la~vS(9{Q}y zMN8Jq8P*+nS(|RCJo1pkq0r&Yt1L36+1O}ynoYAClbM#80mOg!L{f4k6YXv3KQxkx zf{*^d!v#GxzVibiJgglJpNNX?kQBMm6_U0*ETtuU^}t-o6=b@~^uolVHNE~eWQ-o< z=i!>jMu9gIEBBO)lnaW7pi#L+)UWTVuEZk_^yr*J;4=k%;oXe-L`&(1w~u$u3y zWUiQe35lB%w8|`zD*CgJaONf#8@Nu#fcFqCo=)kb%oM`cIG-7jzv~sb22jYQAK&@R zOVWIKL)H~L8>|f#&W^WB2%r}Z7QS$m`t#V8&dVKG;&);9IodDFJ-6=_(8hB@+bYAl z@MN&tCd2x_4m#=~iY&8-TyUY-{VKAUX|eN-vMGXDXez>+2rR-W9*vmiO@9a)e4S4(g<%)pofM3ua05#gCUB$eELw(@+X-19lY)qBuoBCJuWP4}O52yc~f{ zCEFW&`l<_%RCYiu@e}NqAVmz_TOM-8v+8#{srSHLc2&qJ=S&wEdH5|(xaBu2U*Z;dU%!Jnw0g`AlF)NpBG@`~~ zLPQGlfVqqj-nz;5b+MoRsn}mRGI2uH5yB;plJ$(*w+l{@B`)2&eT}aVKQ0!Q%76@(&5Tyj()vq4d3wY{lEBu3`#SeGo zbK~hU^(B#Tf-)Y{VC;x`qcj8CSB|QzNA54}*mOTBImG|?Sok|WVv2$K)@FPHpZ(B! zd;n}xw#?$%VhR;dI^AU7qUwTc8)Wg)$&$qrNuWgC2g{$r!_9zrugsqbPDj96Usq`Q z518y@@yXLj7)fnGT$lKyl*j=@N%>GU&2jiV{DG3VrwNYk4bg;jh(NS{`O#N z`suidIl;Xj%f0d0s^To!fp~{=qg$5**@p?G-EaF@C>;$6q|cAfoOy+WWQ`l+C0!SB zo!_bxwhmF*R;AffPoqWHM1|;c$H=`lMC8%mQR2*T}6KA+XZgiG6rb_I5Pe;p4 z`ESfu)Oxo^{)6?uqq7~o<7TZ?<8D7#`g%nokwQ+eDp@#E-x)U(x+`k>!KL(!ZGxbqrwqY?1n&D}!%9Qe0}jSaayi&mod zJUf^Sk#gw&{$?hxq~XxRGZVt@Q)QLQH?dVCN82M{lySU{x>3sPOVKzM`n_JtLQDp2amxxq{`e~ zR~Ve$43g#zfN)oCpwS)?ORg)1IB- z&DK$=ao8#pd;KCwXObyZ&R25k@fTxXPFlY4^hg-08{3edyu^0beZi^# zsmsZND3Cr&(x<-#5ipX!(NCJ7|8K@TCu%lR^}`mioAbH!%WX$xu|xV|)w}L%6o&Bn zeH3yo7_$!po+{EcCV9Hv)$tTuy27NR%R}qjF44~UpMUzZ&UKy7Q`5K{(}c5*Ge})E z+Haxd!z~nZwCZnh98brjA5qveW21_OyBXUbbOL^kPI77I{r0#Y4%6yixS~S%u2X{0 zkcHpc0Z4V$^$x+%E zXqp&0#@=N}++U7qf*tZ^*^3}0%54Mn09&lJ-SLbL5GmSEqhiLFM3y}j_)(ES(cq~? z+3U{>db>xufiF!rCQ98pXFzvVvVXKSeFt<*AAnid4MJh(8IREvq%aL>O^EgC0u~dP z%KEjJ*C`aq%rp^QJoRnPj^AkCtgOe6N2&;?n9AKn^>As~ctX+3_dhe>KY>rXDhd(G zNh66kUG8>`%EQu$GV=)AW#l0wZpxb7OCerbS}LaMiyC@LfZ4&;kYdpJwBTN&==rJj zT|N%gH0Vet&zV}K{(O&;{_!%@Ncqe(7!Og+I-latvL*MJiVfWDO)n^2^7!MI` z%zu)69fQ6+hjSnuT-X}ahF%yj6Plp!WVD&7AYQA#Kb7WsL^*G8dTivmQYt>Cbddhe z&GXcIHd0ggDpvTLY9wN`V6EOMk#C))b(t}{J5rKM`mjHtl$@5fDC{7O;tU)b4}Ubn zmS-t9)himvuifA>S0KG~1q&3nF&@%vyFt45Kes$~v9w>6-TX|_?70KYL^R~>k!@j^ zI3r_}4wu!%;0dHdx0~KS9l@k{@qe$F4;T`VeagDe%OCdHi|_;v(roBmh9EN@oPV?F z%=N|Fxt&^1e82G;VUt+$GG~NHKfqosF7C)4@K51tx#)4<*shEg=2i*_f8fTGD5z-WSVBgKHo&As_eX51(E&= z7^Ps4Bza;7*EUMwebFu7?+nf6bKh5AR>qQN zO~x9=T#a-f%_9(iv}G$aETi2)Re}=wKiCoKOOlo8x+-fht8`EQTA3BgIe|{dfviJb zx{hFt<$`|{Dh1+jCOYqn(wUwQ97$$93F_`bZH~3`l(15r-f~_L+27k(wn%jQ*1ues zR+6jeIgG;fEG4PaK=tphmdq`WL*5@~)F0RCf1Qy>e^_|6KQVh5=%!Ka`Wk4Cet{@X zLjM@SG*OSf^ePz6uj1Yr`KMX~y*wkVKvK$We(8W-TG$fyq*_IK+7bAn1hR2orBu~1 zh_N_*q3d5YkC_{59RekbeDi(Z^2V~ne^4hIQzV4%78Uw_^+&o%q^T86zHN%}wv9KK z_2wGAGkc4ifHqo6an(V?zqNO=F!7gNNigruOMa{JkJBGzz1+|8je~DQe(lBZTs~YR z4X1nubzZeEUnCHnMgc{!3hb!_e%dIl@+WQtyCRX2uh{g99x#cpfbkWg&EWs#MWL!9 zcG&&YqLi8cc^;&SV{ecDESt58#CTUP<&V<^tmks`G9-)k>YtC?lPcR}2sf9cMrxvE zVI(PxP%Z2$Pqd}#XL)T+e`F-+q>%N<)?nnXO`T@vqWJ{6aOo*N8dI^o9o?qXSGQkHEQT0a^^0g2wd%ZF2RQbC@FgUKZP~00&vTG| zqcmxs7Op|gy%gZNgw&aDUYLU0zzDTaj`>eQB)h!P|5t9wZ8os}kpQ2^gC2ChvvxEB zO*?EzvF5M7@%YPuRsiD@{|4#cpMN7rugsg2HE<;SK6$>`GfVv0l!_C79zPSYQ(g2e zbX3+IQ@2np{Z4UR{kSmg1%d6wHsB9GK_Zs&;QRVuOLA`xOOOP2Z28Q1{PF8(^u-o< zk5E~B*6jhad)vhVn>##gEUHD_1z?Kx{qIP% zJ+G#KT~-lolc64@n<7-txGqIib7qkw0-gMJj?QLZKaBkR^X=GKL15MOV)F@JD#gVdI9Ve zwHhyvo}R)Ge&`wOCdS879OF}lMRAO9G0Mi&F+b#ZDvfxJG~oE3H5C&fD7sS8UyAHZ z6k=BrANSs$jfH*DgyM26j{9j(lf&vFcGoAaCK+1sTce8c(L>X<-DAh%JGal;k8Cvb zYbxa^445uUv~lg9Fe?``Xny&i_O*3dJQh#B{^q-fAD5z=57&`l@ko~O1Lw8a#u;!H z$PDwR(0Iq>hu*5xd~zwBr|%N|Z>Id;(_x3%N5LCu2LS_~>(-o3J8|BVvYeh|VO`m9 z8a4=mI!Z#T*96Zx6+17`NglYusOXgXN+hX{FS;7h|60h^wO+}T$BQh>;^#_*l|aJ zRUCF>1s?c`yU&8DXe0cZ#Y3v}1q{ED#LZ>g_1BE5?OHNtfJD9f>mw!+k~@w55hetL zO6(F;3=0BxwE-9L2|^V+tU3R$@W)%&rdqSF>g?Zn44-?gHk`0N*jb3+-StI{E|!q& z(x?s9*9q_#&&Vi0Xd-ae!_g^WVEHKETy@K4Yl_(Yx1am3t~*$A8g;L2JPmTRc-G7w z>Aa#gchbuZ1aEUj?)RdgTL@G`TBwn%HSSHmEa`%`*lecW)$K2AQL_PEz{{PN454Nw z6hl;}?j3V`Ts#5Fp2P|Ez`DiBxesxq&kgM6+YA81Xn-C2gEw2BuH#xfe88r;g4OI} z0Rz1TSgmflg2TQdwwT&Kd7n2xS7)|fuzjjy*AjOTeNn;%&h zEng`UnylZwU~rn*U9c6i7-*o6!Qy2lM$uIni48Hw9z3d{&k9nZUGZ^w?OR5AtILXv!60-y4X6To0LQ1FU^I zq(NWfYzxzEFY?|?Uw2x@DWoNqU`#X4jiU8`tt{vs#$@XTn0}Y!pEb&>dh~5L0&A^M ziazelB5E#^O`P;;_UGG}*b{*g07hm*L$n7OxIg7As1-j#63_#1Zk1s}+&g3;hUUA=El`;o4~ zezsGN+cD+bxELNxKR=Y>d3bfY!8Ar%gYFJe2k)CjjCJHF1x)#9cH#&aBJBiGJ^W;D zYf&&6^J-vdwj%=XC-;T~qI9A=)p6JVg4U1}T44cM`AH||K9^VrQvhz8+fcn@f| z5|S##LTCUqU^ z@Sx$A4i0C7fyQ(42l}2NSr6hL;%snnj1RSU7*QTGr}}8%8r5Z8xw=Y*mPqK9%cr>W zDV|MPwm%X%+m8WMIlND% zJn7Hy9KZsBk(SEz1tiwt0l2|EI|7xeFJ#164^XEfpatN7csD0^$%iwr2bSi}+X+3^ufUpeTI}y)6pLiq$n6XiJ0ZPls0o5n+BRQ2%|oiK;=*0i}imcJX7}3e8r?CWYP$q2OBizA5(P zn4+J1m##$=>ROaMGL&fYu?ZkMQ4=1OKQY0T1dP}o!S3mPKG=E~f7O%1PPactLY9;w zE?*$=?uqf;D`2;FBMqOx1yOb7fO+%BIDOf{--?IPeLwFx8%5)ltGS#Nu9~gBw{ci9 z{&aAjS-RLH8W`GDq4w~!`hhF#K^33ni`q=sdu`OaKoHFf~)D`Yk0xJ6xrARrjEIi6s}d(>~!mE>qv|Y?pXgaubOer zODH)kK!}>ejgO+U`~jkZ?)QIE<2$>*SZ=(%wzX`e$=CkW`yxndeS9|AywF3x?!!XZ z(SVMNkxaD}1s2cGB%dY%rz6{J(d@t<7qACX1k%l=EsDriOUN}?WBeaag@6C^I0(fF zD*+wsb1;0Neqmij8Y`Q*B=H_4#ZQ=1Y??H{wtP~U?gK>kj?21()OxU!n|nveQZeRg z`LXEPx2CV|gX#)NJXZr5`NP>CW&^QL=-1YMf?W4<_TV&FGW+tdOnl`a-I0`h`-w6g z^RB_kIq?#EvcJ`oc6ayr(CJGJlBm0W7l~@$)>I0zwFRGQ-0%elqTBiI#`;%)7v=#2 z%n3|SiJ7+~-`)=GD*y9RUwJgxVL23qDY5{|0|tIYJ#Nl&iT60vvOYw{=&imed|Uo# z&pF=_*-;=8zGo@L_n8!3_jEjLB?uLU4Py={QGr6UpYL^fhRi0f1yp1Ox570X!UDC{ zq=4Ni2Uh;H;I?8oO3~w%^WB2cm{x>%#uC*1ubcb zZKX8?ci`(|zBc9ahU1yfpP<1j8U2%SQ}o#DoV>`@GNR-*4Otu3JsQK0 zRHE9F6+0JQ=z2#7WZio7nX$zyb%U$Cx#<-pTj`(wcskCEy4Irm?(T04h6GEN>-{Cx zy86&sZoZo8w3mz9h&w-!Cxx;w0Olp1*8nMbaY!TPQTHP>N3H_VWEbp|;X@Nn zV@XWzK)j6w0PH>042q zuaKWb0hwzi?{Jr)6n%B=)y#DRy%yt?lkXW>0?^X(Qgerj6$^=r=a<+-K3rbEDI!1N zWZniBDL(|oc5>}krczQ0ld|5*a>80#b6r9fJN{iBqC-F!DRS!(-9QR36Qao z&bf+_yjy{8$?Fnd9zP}Wz~F#BE%8WgLAhr5b8w0G^HjH-gW29PD~*~y|CO(?U+!0Q zRmB$B-9juFw&>O5Y5kyPUYNOxC2Lu=@k8lkr#SX(0GuUttLd*75_}q}^>rcC_Z4*! zKwi%}EaR=V0C++veC-(R402oqm6pCzpvuy}AZoVBgifpgKRLn1jGZ4ZMCuJ6_|zd$ zIz6luA?I$<2_oBp5dh3qcIMmssOuh+y0>-{8B>Jan)jFooXAwbUr$Zh-TR8tV3LRP z@1|UA(x_skAFl5mY~9tEvDx7vRsqf2V=K+Dzz_p2q1D&thun0Pzm`YUmEHMji!!)u z4Hf=c*I4jiF#cQ$QN|RT3t6MzSXW8l_63(oN#jZ2VUf=Z2l1$FpGu}R!@fvcIEL?* zEWd=lt86m^mKh{ka$O2#mDD7L`WO%zcZ${>ppU;JdQXJTX;3QzoWL2|kf>Y!MU;a-dB2Xx>>^c?Q z0Fqjm4|Y#h!;sj0O>qe;#Tt3LoZ!7+o#AhIVBYvHk~l5#IqXvkIyVO5dLF>jDPhI1 z14hDXfT*(pc`{!c{m+m8dkTa~jL@TWN!G`c^H1OS5PTC{jV)1Kfy7}6^;!#9tslj` z#hkZiruC>XHHq{)JC-ODPEK5$CCLkJ17GtI7!u;$`uzUeW=;9cj_2~X?mupSNtblv zy(+a_oEuqY*uwRJA=WCj&x6{lAAa)ZpABSaEM{6eMoy1PAKB-R-5}}yZ(73-z9Ejj$;kPO$3G~(ueoi#k@_Bcac!KP?(O)o5d3EB8Qy4F4S<}d1 zOpRzsNHjg&dLljf2Z*O~tg0zm+f-U7p;Y|cOQP@EeW>`Y25hRPb8Khe9UtxPeA=jk zUWt6-Wr&(l`Mr>L3|STaeX-+30L>;7&`}aj#!?gXdkEvlrzZ;$tH4EJ(I>Q-uH|ny z`5jXJqL+%6WzHOYN3hvnw_ITMGP+$4zp%4>K&AYt z!EopOziBKHHaTCDan#rE7pO0__!B1{6Rw5$ZtFcxrK3{Y%{g6ZpbE)m*RT1r#?qVZ z7*E2pL!3hK{ck1_>c*e>B&c<%1OX7tUg*L?7UgwlU`vu(%@jYnvC zKvD~`miR!4dnnCiPfO{0Taq5W6P$>(c0Iv_q52V8n>LIY`QT8pcI%GqCofo=@I+)P zur{M4y{>jmii}K~6oXJLT(n%?Y|GGrE7c} z)D#tsZsZrnOFPL^@xP}L8X>|jdMNHZ;9QmZXyl;$(P>_Ej96o2QrfjsgUXbHuWPl$ zF0002x3+t`cl)MV=$ktMud|Vg@XaqA2*HaH%rrOOXTQ29^Gt!m zng(nG+@dpIJbg7LD`2krG$N*=_=v&K1|Nqy69Z_A_9vJ;- z^Kx!7tbyIJiG5%MhI%IXTye+c&=5U+?t@EEr24*CAl@FR%C&46NHw^S+O&JMUKHm( zkl&W8&7<4zI=_tL%p%xi#z4>9PfM{q7CxnP$LQxH*KYI7r{ejl9l~9g=1$3MEZbDW zhJO#k@{N1-@0Io-gzA3$?N~W@GOY38f-**1{1GWFmT?oUhFs~Z0WeCl0$+n@F)G@M zKYG0IQrsp)Mu%9g45X)j58nUxem8R=daD1BGlnDEEnh|yZA`s0wKU65Gz3*2sqfn= z-=DI37d>zQ&=-kTTJ0;m4tCkc7a+^fmL?^_Ja2kUq z9M@_f4HYSCDF*|iZv*d#SABlbiJ{X!KxU!`?_SS-y*2fdon|}p0jKoZ^Kap_+9&Y( zuC@_~Yeh@Vx~+4tL~Vo5gxcD>$WCSR=iiZc;JqNi<^Hayzf%wJD`o4&^#7kT${3x| zw>j+oOkP&NGXrQB?BvqGzT*HO19k9~v;h0pwYMB&-_+cHfhE%M%Dl4?i;&}vhy0Iw zoyA$@y_8+V8Vr_zS@FiZUC0e0 zUDyR=A+v%_{}uc!@)!uUMA$iPnPn|9|9ijDBP1s0V(f6>o5twTj3@yOu&Y@q3S8^WZwm7fAv(k4V2@>PqhSxctj{aW!7#E!E*) z00tim&TXStAf{W0`3+DX0JO`CNwcfMmW zcd8g;S|=i4;MyV5w(8%_9jx{5h6zTL*bqS?TtzvWG_B*hg)O4l)~y2 z!!GeH@2iXQ=Re3kBbJ3NCW%^{l3vS~_~_vyTU%psbeCt-UY@P|rvDXMWRJVU;Nu)( zi0NOV#mS~e=tK3q=mr|OVyefUg0}O?JVN>IN8}ua%o*n2ga<_D>G5ATpS=(Av9*q8 zP8r?$8}lv3y#yF^j$F;%ejFy!D0nW7J*~pt>6?561-Fm4E#x|qmX(NxXGZVz)p%^< zjz18hDh-VG-u_*Fen#lZj9mtNk0_9hHrPTUCBUU+yDB zCMW8=t6wAAGM}dbkL#OvZu$SQ9Zq_n)>R34Mt7oPcDfUIGOdo`Z+=9`-`{xyHE$NU zsGz&Mr=T$PEC8YFG0}mQMTP(kkXn-6cRH&clP^Sp+mo>QildHx-l8AEDzMS)7nXEb z{ru(37u~3m%Qj)WAW!gg7HIg1ERq6}DEF6!vz5$aChhqD-*yFy4gm}B9N>Eo0C-2$ zC{>e{_izq8+|uCFq|CVfp{)f8S4Ji66?^!ljr?M#-c4}|nu#{UdIxo((!a0b@7_aO z01`b`d@NA}!HEI=*W`sHv80vbQ@Pt>MH{m(zhcS|PTCJb=kh9NiOV*Q_x8Ww*ZyLF#D6zHK<`KRI>4V z;QjBVXGRE;c5dEHK*PMQ29n>{Y0JMLqstLM@;%bC_=JisuO@}TB@>+6l5g}XC@1S= z{%pnaqeLq6lilF1*I@tf zKDvEJ_!Bzukukcpe=|7o<->8Qm8Zt=bTs4+a;E&>w_L)C$)^IRL|21)X`+jcieYc* zhn%rm+w)-yzG}Zew#{aNO0GGB@oqC-=Qh~_2)kzgu#oEw6C-{Lr<5;hJ3ZZt?v2Xxwm1AshD&>W(=-9>ll+kk+=FR9s%OwN>!e- zut5lzfiZ3eE0R^yuc;-`aF6hNe!RWfo8K9cOk2nuUFl519hU2D%-%oe947A_&wINy zH5GePY)bOyx8)=`-AAY9oti!F^7G;a-SK}PiX)5-#lTN%l263zqWal2J{fOS2V4r@ zIik6SwmF^!Qsna|g6MfpBJ$$XTR8Bh2z}Un09YA@5#bNnZ);XEJ7X;H7A{2m24plG z4V~IhjReH>YD$;PBSWPMGAK#!pi=T{7e0=H&b9pu@(4vI9C)@5WhV|DHLrtBxA;@` z6)mpgRKYuOib#_K6aPB5P%N#~5Hq-2q>Rl~FEp{}SxFTO=g||E!M_#bhK^lHv64@&trcn{DnXn zWwj7tFcqf)#SxeiRK!^86Zr&3!S~tw0UfTx_!U2JgYpAlEic2LAR|&g+GjYEc3Y^naM?_AvOo zL7JLyF3{b(JI~uFVEmpAl;E%TVvtoE%NJP@`T9jjq&a@)z$58HO9a4%@Da{<5Yc;H zBz;bv5<+~wg8bFF^nIIbKgB)W+bkuXhCJwqdYYf&WPQZH)6daE=*R4_Iu{w}Sj5T-Sz|{XT8gHGx z_ZsjLQaU=-ilp(Z1$SaDmXHwNAYHy=ipWa;CV2 zCHA_L9{bq37qV7YFI)?}%oRQk+T5Uh?7|*p+23%EcN`cOKU`XryV7E?x!PznsS&_v z8LhLecTrAB5&ViTB@g zIupK;e2nNtuZvx|kgg8>DWMxAWz$>A(xDMS*MCv^j>w^GPk|ee3`}5qdK6>}!0l$r zduLQ?dTstivX}1ktVv0t9TIh$phBf-mJGvZwvyPpi7cY;@zGC&X{j&27a^Oc5lMP5 zTNr`#po&)=3(wb#X+mn=>d1ytNJ5rN7Jw|18A0}z#UUz6g6M^7kb5qUdCAMR|vd>lQf+&Snvk(aKA4UEFeHHUt2s=jnAnYE6i;GN~^$TG|#-)TIw|Dm^>N_+rW+WN!1hcAUEH5$(n;|?0s1{13seu@hbHeby_ zxaSJMa_AWP^}pPj2p2M6^o50w*>ONsp(jM&35VTA5{OWU<(ao5-3)7 z3_N40k~H!}4=-Tyx>I`MX3h7_%lp($5P3NXkeAc>#?0oAcxlH8^ClgKuupGMykq2P zy&%Ok>d)jO1~*$|`D4cQ_47l2`DtYRquKNJAetZvU{u(&RUh{^Iy!lruSOodJAIV! z+Ti;b7r{WB-{+BMI+65HdVhhqxc zti0t+4V;CTCCl@kclVBq56jKRf_xH>oPvz*8A+$|KP}4I#(zBc)~(-0`Pxgx0F|~Z zmDKOwv9%klKfRLxm>pZ=xV)vVX!fVoXaSMd@>dgh6egtRz$aVL_yy75DmU zdW75Xn6FAi?Jdxh87ll5-v62R5=A$Gs5$^;j7e~V!>F-7lV`rBGP5p~x*xg`FG8P> zC9o72g#Fe<`=YUI^9|G%%s zUA*z)wK3)FW3h{Q#{;Fd*1sx?Dyn8+_^$;I6xR|4EW7(E(@K`S?V6`o_Xlz;-Pz=J zdq#dQu^;r#;UB1^s+8}?G}ZHMN+MDTn6k#ERo99#Qg3bN#MJ%G?o`#a3%WeNXbZks zaKIsH>2TK7!@?q*L5aFSBsDw)2Br!Sk97NRby%*U%q2D5-x646WK2f)s$$lWU3&39 z$QcITKWkl_|B8^*d*q^tP`NLd$c_;a`6wE+psWH06+cB1N_0zZn(TE35TPJ$*;vSc z197gvoQVIb;03aKfKwuAab~8?7Y(yk3#G@c!Tngr3vzMpG+gZcSc^np8d`q}@?o8{ zKSm34-k6Aji}u2I@#Z%iW2JUNiPX8)4py_Qdlx>$ZYdNdi?91H%++~1EpPXUEI=^!+_f4C*SC$gM66M1Gnud@W4GX@?vbTG>b1PiwvN2cFBvzzM+3;x-+fjc=rS-byx>e$jr~%!{$t_b{DMhq ze@i2_P9zokF-k{uHV}W-sDmrWH>#&_GiS}~%W_0J=jWDr5aCiN@!lpUzc%u0|8(*q zOr(8OZ?$6Zlec5F-$YbD(E@JzB*ay3@b{^P93b#QzQEHq1UF}NlU=+^a6%HxMvUpMiY@MhWTfL97Kdz_bMoc=5>sXMl?XR2QN+b;x60k;o?}966VPoX--}#_E$e#RACFm=eapGRJMZ zX$sUNCKNUqz=v|cNH$+umDT1fy(n8TE(84e2Y?)_$|M`A)jUXN&?T)$ZVfsn%@R=;>=1yjw;zlk!75mEi6 z{Uf6W(Hun|ReU7xiynEXB$`8>r7tQgup_biv+<+1*>w-`wEkRujislo6-k8M7@OzV z4o$wVJCPg~y5GH;o9+k-hHm^uo^wE%1L zarTQHH3uBo<}oY@!EYd%l7}arn{Vr9ZEm$gsKfyyVT!iw!S6PHp_`@%Ff3_?hGWc1MXdzx-|7~ta1-9}Ga zCXIhp!E7(bpF*9)h~E5t9|_or1mIC-a8}R%PD<`sdRd@UB&4uhEfD4jESB{npA8Yug>K(vTY-etD39dJmKAK0f+v);?s-S_zYqf?u@yw z3`b|B0Y2?=FJ|_q<4#hLCTW&Nt&{6u<*1=?0hxcr#E2$f7e7wqP9Y<;HbKw0=?j@( zc|64U^KXRL>SCzjCAyB<6zVQmZT7Ew@)1S|PPCu+bYUwOM%*}~Cl7ftC509v2Utj) zR?g#B!e`{0~jAqyxGk&SjP4KEy59{68)|l1}}?gndX9hQevc| z?=D}zz<2BjH2C)t2-2c@P_ye6D+2@IwGipl6+O{_Bc2FyfpLVYTcmfxj5D7^RHd`s zvuX`ZQ6uWi^|qMJZ!!La0`@5#<`pS|B4Gu(s*4m}vtuu7vQ&YTqh9P1bmtA}HVJ7T zfyfPh^FA8qRkd^f^C0J#8FGBNpN-v0C&#=hP0rb7^*xfCoug@pQHQ}CY)mP=B0;tf z1U+6kPwrWh943rLy7gE~q&n-XUwh<)p>C@x$dI!I!WdChbcKlER2=Zii_(EM2shqtmGHpw2%Sa}RgmD1#bj9-e z(nioJs_ZJXLPxef(cDacanMq5L?eSTrS;!z>zg2fGHZXS5)p)9&qNdGlVc>x>P&;e zGcq?a6ZniOh{1FohC@k4q`^Ca2^G#LHT0d{f%8=8qMJ;&*wi!7r`^_z)`r%gC?a~< zwSRY*hh^$4u&Y;Gu|zL6&9$Viu9W27<`lDU;#5GtA8~Te{-FPA-^r`QoWI6)kZG$> z`!m@k$RzH8#>j0gKX-&>e^IM@<5|{mdC4hUAT>C33pW@Pyg+^uj4`*G5_p40hK$c! zJ}zLf1wwSA_l(Vs#k@*MlEo7xUfp=CoT!+SjrqDV^YTZ;V0_W|txknyZv$JP=3))q zIzL>Oy9BrGW9 zX%VdVV~`s%2!E%ZcWB;9Y7ZmGdj)q4Qnp@@?2C7X(#%>H^3v1e-F4^$2CaQ7mD1 z7JL&Kr}o3n#hTjU8SMBKpYOo{kv5`@lJqziKf?Hn`FA28b_eUTxv_Qd?2#ty=tKVN zjPFo7Ur%LpPGSwcVMxcH6}=R__tO6rN+Ut!YkwD)k3HKj^eca1q8@NvuctSe1N{7g zqe^w$PUV8$=M=#?G&go%OF$)&_hFc-S9FImcjr^ln+u|2FNwK%P4VCHPzn{ zP}KQSw{puSEeK@Un&SJpl3y=oGC82SThT4+4!vclPZ?pei-5%d{YQYjlSL>E6-@f1 zgan=n1C@eZBkR5C@+p9{*zPTGmSsVe$ahxFdYUYtOY@v7&et!Um|SD`k*%02qsz?# z`TMLZdkp@spb4_NdMi3f{i=S@{ALQpesjVhII|;F(3WuUDkjPEv2{@DJ!ID@l!ub9 z#U95@fy#pAp~m+D!Ht?8_O zGrx`g(WT9_TStr*;N16(c zGDgYng@fWds}Zh$)vB&J=b8PWw$O-sc(C2KNJRTZE(sX~aYS=5&U^5PF<0c_w5^GM_E;Hww;Q z&dDJlW3CPjxx*P6*svRkStO-mLT4Io5%0` z8|5WaqES08S3O~=ec0;V+SR+TAV^q zg}Jy`^J<8e0~3D5vTG~e1viDmM6+@d;4zayZgqkS7$=R0XcP>QMKh@S$Fph5E-@q? zXr*S-EMiwZ$*JF=M{f2QC~NgEoN2NKkBn3Dh5^^mnnb3KfNn%v{uW#ROa87u$1YN= zyUkGo>|T{Pt8wR*uFlcg0&78TXlNUOucoSb6vQf0@1}R>34Zaxi3xS@PCo2(QuvKA zxdo&Y>@NDmjRW5gF;gyS90Qo#QRF7pPx}`uRdd@YayPG1(!2+-XZY*I1*Pp(_h4pJ5D#m8}Mx&;GomThX~`n|E?gS-$TU^OpCP z_p5vKEZ^RP)Re8uwEgLY#o_iyeoSQah-qVT%9%ObvJ8;TO3f5Hnw^W9AjBPcZ6_N# z%)M{QX-c6PUqn^ex-wXl_3p$TSl-QY9-0W0zMk(T-|!AzNKIAOeSOG>nf_B%3bcNN z@?RJ?N#4`pUT*mb7~3XIwXv|ywb_JTeCnj}uAmKkXv`^a{w+D&S)VVQRot&_7LRX# z=nB?;K^D`s723;iWmhx;k)bSIb$-1z`H%V)k*siVSjGFnF}jVN&!|%aU=~x>0ofg8 z#S%h(tC25L8U{&1#mP9z$6P@_QpR-!!0I}~-;XWzmYc10Ik`zPeYR47bz%Qx$dEc)$b_y|m9FoaX8 zo$zjD0t4m_Rr58oLQVT!w5C`OlIHenIgb{3pqXZ1xfk3ocCI`H(Inlfim0R-MQNtD z+g;y|EN5I~lpyX~EK-HY^~!g9J3 zL3!-b(nm*@v$h5Ef?*WfUwujURi~WC4e|{?(9ZG~YwnSw<%iKG)E&yYS;gPzx+qnTz;`mh z+!wZ-bQVmRx#$(Nu41*3Zq-Q@^K`jtn|-sMth!Za6^=HuG1H05C-yrb)X@%5%_m5n z*Uj}4_|#7|7VJ*H$5I8hsd+XxT8*;b5zPvIR_u#54MW+`-Zwpoc(fp|)`)*4|$c7p&@=M05>Q^FN^iRF8dRugyGn2cjFp3(lEAFs4|>OA*( znj+q_NVD0==4NGLLaS|URPL(4W^KKlINJ1pl5M?RRp5`ZKldZfJDyx3@>`7)=DptN zG&y=ZdIu(9aL=}RknBr|>iz(~mX9x2FVQ^J5w<;bgy9dEmR~*gPI6RybvSP6?4lna z*dh3)f1BfOXT#G!V~=R|vt-0*xwKT@bc!zi^7eeD9e8jpn)P`k<+0SPYRgfTA*K(d z_VBAT*K#mzKw2`ciClAZ%6}xd=tOQ|AZWN~8FWs6bdEw!(IZN{wLh`#xby3sZJ~=`uo8aKV}hAQDzBvG*bNKgWfZVB}=!3blyyceBV`&z^Rv4P57r?fBOj z`aYJBC?LrNB)(lrvl^dBY7V5|I^M!@h^8#HLuG))xmB{@D9;5LVQ@ingwE{dnkgs_n!-z<8j0%+~XYzUH z=BjyrGKom*GWi~Mn+e_ZtqB)d6igQlQ)~qVmNApGKM5N@u(D`SX}$oj!Vf%A`wu=i z2tEiTWbm=yDXN;u07%X6ww$~9yQA8Ew0a)Lge&=d@biq0rFy=N;jpPs*eppxea>Lq z_%w%>pMN@s*5npn@_pf<6(^ThM(&&Z(d=y_?&2n`=r;n&A?3NM6OsQS6c!%o*D38E zziYFm5q#_Vs6a__&|a{cadn>(oM<&6(x06zw?Pu$Rk6*Vdx2 z)rfMtB(JL}Av%$$E96i?amP6!G4*Hxxo_Y`yq0bDyj$UDP-A-}gEA}a@fWGZY>Y@_ z%MW`C|BiV-j$;wcI}{6b{rWzMzyPC$P8sr40q1b>abV89oVf_A^-v2`ocZ{RX^bG% zw&dbaQ?6T6m_#&XAV{xCH>mrlPmW_YKkx0;W_`>f(qGaBP5CO_Ic=D}e9zJ)& zMdt}8xsEOo^2k`PnZ+OJ)<>(Xpe^DAIt|TY{Tj$=m*iBwb!)j9W2gj4f2=1mQX1l2 z_&I8O^x3lNrKe`5p>e|NxA(e?OZiqA2245^?gk0z76c?VDJ3beDe3O+_B{Lj z&N;(>42A<1Yp-=b^PcmXC$hxtDf)OA2w>AB?Xl!lBFx!!AAYULGtFl=r}ftX@IU}u zFHtE@l$r+nc4IVq-t;EP##~#dH0ozJ;(BnZCF4|3SuH#e#)>6~6*BL^ODV;u{26Ue zFbSW{3#mDBj59ehUYSMMuG9psxZ~f$wnRC*@eUcNwInEpN=sh>`+W)BrD96Gj^J4~ z&?#Yz{*>S=BpaNWXa`E+jHjTjFi%V2x(_DUeKD=Bz8h!<^#1wD&D<7n{)fp)M0cCExMVFPDZB zUJcxh9L&(`7pPfJ^pAJ(F20MHEubh*9H^0%9?tG1Lv)De0zd!t zl!d!T0O#$W)1j2KUZ*`s-+?cs`?=jjA)3@@L91uBQQ*fiFaZxe;-DQ_I3n(EQm`HI z_+PkcKA0Yi2h)Qs8Xb#>;qPAtb~gabat0yC`%e^!vyDFu>x^;7P>i~Y*KhuNt%3Ec zG~9BOxZfQPF}>-O@hG&~NMuTsa~;+VYq#)u7ogZcnIZwmGRt`Zb;1f5%-i$lnK2%p z@0tK)T4VZ$Pug9aT9)8)K0~JRJHh+%(#|NP557Lg11?&%#sic$a#eRN)qPYi!a{Xt zv%@-8UvBr)HYE3S-Jgze9E);$wgxl3=>PmwM!W-idjbXR%@bt{S);sa1Sx_@ zAtDtQKu}(4({9Uqk7Jh8w+>P_&$OMa7_DCO%u4a$f$hBKD^zz*6@WhI>ZLNV`VMir zKJOF!*azLr@E86-QM`Dhe(m_dJ|ttcnBukt!Y4+J;Tjn1KZ%Q?lP&D_L!@wH>t~qADz(q(A?%Up0&Q9eK8R>%DhR#_-M=PI)Gk_J^^YJuZpa z17Gcf=J=@FU3N9UQ1O96E-ACW*i_h~C@rV$G;7gM65B*$jOp;E6{Owm;x0M%66+f7 z`9JQjsR?mA{o|WI;78eQ@WT=p>A3M(IpQ&uSni3p{luJ@oS3gjl@d{Gjd^PW8Rwbto8CQParEbo3qE3!T(U&arx-!}IlFFCBQW zSDsZiweixmeSCj zXFF$YQnQ^+>MHkHu*4(>GQzO*3yBGuz$VrId>>d`iGd$wGj%UA)cm1-0oJV{3l4#7 z*I-&=g>~jMyJcTODL#5MR_DgX-6>d#r7QyvbBMVTj%H+hN=Kh^zsDc-`>?&;|IYW{ z${Dx2eTq~NuS(#3Q3`wXZAI6`>jHBBHpCV7=<&%zLDfk;Bb^K6a-`9=t@-`vw_9GJ z^$;y7mOBS4*7~;nnTF68ooUQw#!2IwT3+<_qjsAT#%B1N0vnPnxM>Wv3mBnE)htnN zzsuJ6a^stp5VH9xy$UY&S)HeTzp~HKd%IEU>&HIm`c>F(2lJiSs2BS6)^U#7dj@=6 zd7Ad*SoRJ2mr**cn_W+di6tn z{aH;@jOZ&)%x7wdx`D00asNQGHGm3*1OFt6tvVy3eKheBP1E5EP}~1#mM8Q|($WWU zErtdiA!HCWJ18J0ep!Dez8v{N$Fd{yN@0pob8{^*5=wGiIH$`6nkx9VlnAO!!P8{= z#+F+w!_{=oZQ1LL2b$&H!?&JgBHUr+wdB~1i4bkm(his(M4Fjd&S?NrWQr#vNKEGj zImHa2+EEWcSxwcnf5so$6&Td&+HNTXJAB9E9ah$Whogbl0qBql9ak;i)3zWN^&E@j zm_m*MQ?S1MCrNeCfYMz1#FN1ScJ*wXz7gK(o1I`J_fNapZP_dF>Dc?Z@95zHdaWyk zX<{SL7q%8U@$D_W$#a5&w9=472D7-#z6{BsG0D9Ftu?BP<_dGcp+7$Erjv7tEy36cya>QJwu>N^k|!Av5iXY`D^D8<&YT zY#?9o5en&hqg?xK+~?c}-UnJ!yIH}74HNI545zT94c(AdMEl&NN@|j&6W72aE2??Z zeggUe!g2@@ww{yM*tT>B-GZyc+IY47ac_)%Ju{pIs|+DKF0i`G5b*$14zDITg`PGD zEGjjWm{?5Il8Nnf>rD=2E!w-ODf*oprj)x+K`J`>g(Y0<@(*t)Gi+ZxQpk|9g}#W! z6D%FSW!10$ov70UTB?FZj40f75?qK%O{`t@)i}FQ| z;uuISsT6+JYsl7tnsT)Su`+eo;y0bn!CY_3>wYZ!hQ7^m)m!yOZuyqB;JIppl4f@7 zKye+%bBD70^4jU^{St{E)t-Y0SsaVpx^6Wr90?Kb|vFu&NnbV%EWxUsx-NnQ3@bBY3o^_LSEMJ{ddtQ z9q?}TOP9bFD4*E7-`n*mcsyB@6AwpUuV78)(0-hNs!LS0_i7HZC^87m0V0o5l;b3# zTQY{}AUn3bf|5DyKz#Aq$~_ylWhrn|(oNxN0!PNqsK~LFN!Jr7T}H4NP9Hbpt{;M4 zVLpt%UP0jrc=EMbZWmKT6D-d;aZxX+l}@)7sO52z5jBqVbrrJwM~%HGp!-L$SVY<& zkSr(M0GP|c5(-^$%hy~UC<)s8N-LO%81&sVVke!5gKSgliOC1@FfNs|$ZoDrKxoV1 z&PSMHgL9Aif=M$3nBb)nO{fPvsILtEu&Jm%qk~eya<3Z}M2QpLZ|h!-X5pd|2h}&P z?W}ePPt@9#SnFRBXysY!>^0Vt9O~aNviaDci*Y?D4$jdBVU14*p3C+ap~q@uUO1l* z^E$J;-7Z)Re&A!=e3hEhHiw?HlQ3>vvZJKPkJ_k zXc^2Uwk+zG(r*HOH)E=2v3Afp)2@vKKQUV2$-DEU=_Gs9$L?Ts%f4go*wi zF=8=%F}xgNSrNlK%B#|cOZ;`LgW|0I{ITd>SdKYBgsUvL}7oIF>6xTzd?0MPnKQpGirn8gfP*upLDG-wf=VuIYDYB-gTtMAbpF! zkwFT6d_9`_9rY!-{8pLAeV;Ingo*P!KR+pNEUcj<>LrtN&vfw^LN);84y|MKX(D|#j&=gz zjMaGpo)kh*Cfv9rreHdiI6dsZe20Q}a)SApf_WZf#H@>w!q}v_BUrA1<$)d6kYe!< ztXSp1$s@%mAo!#`YuVj}E&mX8#yvT#-WyD{8REKgT<3gHrD93=%c|nspXwlUB6>Ob zR)@In8*mG}5@_!)F4xUQcEO#c`K!*&Xkh&Yt5Atrg_|l8xL@^5VEsPILK+qG<=QL+ zSPfS#zAMhf~Eh`>nj|g99HI!z$PK?)e@wh7)NVGt1h}U3>1|QDK2eqQ|tx7v1>Ck$5Rl zWvA|)#l__+3jcW4kxl#G8-vryXEh>)C340ak6}Zo$zl1-yPTR3-b*kx)9pflZC}@x zPT{dym+Q@4L5b)r$Bnq@FfotFm?TK;rMM$iy&1)FTN6HkbnzNXzyxt@fg@kiBg1<) z_G`3jMLKKh7i0mz4S);dMEyuVonm*XHJ|eL!Ev+L0QIFYEW)ib$7$mb9>uyaPS_scyb1 zm!uty#-!3aN!-)l9TYu?@^c{@9{*OS*S_H&PJUTaHM74Erj!(;Af|Q!QX$ex zC>r*h3A>2%zkmdUJHl*~s{ zR8uaf$z1_!n*Oma@~ohvOrN6SsW@|)GP&v;gjhkrOr$$!NEemc*znJ;izvoQ*E=BM zMglia#RIsmcJLT(vq1Y{ecc0g#!RBF>3x`oo!11zkD7bADLQ{Ra00+ID6Mo}|ErT2 z0JaG?zbGw%RKKoC@!ki{9Yt$@W|ehtWFs546biSkCJD^eK>>Wu_JWMFVQ1H@n~Vpi zg>42SRD@=S#AShK3Ns>AupUD-Sa;=}%G$R%z8?OHvIyuk(n>=OTNDc(S4Sd@7uf07 zc@tN=E{nZ|MeE6aE8hho2pzO>K(gXxi^Q^=)^CG>P|Fh48(s4_<%Qs^+tp7jtZxl1 zxk_imQs*f;?vK4CFYXKLJg-kG)}|xav_wn${#uvkAfinxP7L z-D&7+qJXIvz0wYrhwIo~I5+V)7{k6U;<>|9X#oq4Z4aX(e6A55u>H;XzXRoZPzZ`< zaZsSOWI3BA3hw}g=j9ewy2Qn5Vn?fEJm4&|Ys)ja>#lSTeIUhBzcYznn+?@M+;R>d z5Tx%#Q_(U0Ve%=_be&h_!ZXwzbREfzR1`+i$dPHv zdeR0li0FdoM}(<2yvquhg~Pb~K2>NdApSkY?q`AdK;mBfhw^i66=F^LSkvKdd;ujl zLx;xKT+_7J6dXnB4x8z>nSLmcUHY{n(Ie zlxu0E3i*Bq_R2OYoycx^*1nrW)itnPn`C~__p`!1j@;Gui8(QhVc;^Xv!No6YM>KP zQ1AZsY%c&5E}^azk}R#f128gM_K4%pSlt3rJl(raq!${%s+K~OzD~`A9qnwqbRLR5 zu`!(SwpYq4^9@F{CFk8d`fF!Z7yUt&^g9>{WMZx6+3NrZjhh2#mSt8cwE>P7_t%#U&FxLLrjdywqj5GwmEr0`}eww`I6VWC02Sg&g z-F~~R?UnDo=gi@+wR^l@cFA?gMNtAL<5RHA{A!5uoVnku@~avm@N+7h-k-tt*u0-+ z#~j23Ip=Fkf+I_c&Q~pybR>2!+EMEt1+|O-WLT2chHYixPo0ajcB3w znxco%5gd16=2V}?c7r`XNrF`1wGH`%0K?+^7K_AnQ&07o$V*v)v zJ&=(_O&WH=3Ii|xNx*K77oIOrIL9cbEF#Wmu5{nz=T!av$%eR0Mw%W9=ik6CMdeDq z?s}s%F(5<-DPx_$ez+-ys#ttB_0m8t`bs(Fw%{tp^p^TTZ3k;XSB<}zEU`WX!|Gz-g@PUqJbBH;ZUcw(X#z9 zwd4@wBpR?(>z^BiSc?!Nelw-?lF~E;q9@W|Fj=&{v0*U9A%Y79@dhs~HNSp?tB`R4 z{Q!-B6k!A&Z=eCd;Al}AznW~K!AeJV8bEb_aLpELUhH=Lx*-}AVIQ3F{cq032H7C5 zWn27r$oxYr?J$Nww+f`h*EM_O;^?*+G9tRz?VIv#NU7>$9+~$*Oq%}XmT!TrQO_g6 z{A1`)){Xz_e6JtHydOoxQ)r>sfdJT~v>_y>(7r)oPuiWs$F%Be%UPZ#YPPl1^7$~U zE@OBtlwWdj(*1A(h`WtDb1K_Rm|91s0hsr8;DKr2y8HcDY&`gCA9V3FY)#*`0wjn3 zHI1vce;*;6Q$U}4fT_bys)2oNJK4f=?c(x6$3n-zw;;*u<8&u=wZ0@o&JvS_uL_9Y zG)R#9M5@-5%u0zk=3*ENjaN=|?=zfKAXwWa`yyzUutn(GEX-nAlD<&_Mpc2?G8PiAH7)bHLPA~TM#ZqWMnim%M)4JXH7 zCSQf*a!?n7U&K#oBU&w@^}y$e^wg_6AQO!PqO)478U$FyF+gWVS5Ub45+@N*_fHkl z;kxvzi}(lNPngg28?>)psAFMy_N!bbFqLr->*Tfb(0kn|p-1S#mLVT@KczPYa%m|O zBLNr+iZbWLJZQdnViMEFBJIZCN^dF(B}Gd%ZCJ2NT&0uwV}5leZ#ivnW?D$r>b+KcS>w+>8E-A@3squVCAAnTbxHv zbJ_apM(v}^iD|^_BAoW1e@wQhtI;2*nL7UIC?Fou=Nvo^H^CX-59~b2N-RcKASTC7{pJni zG@GA9s3E{c{D1rZ$x4xJifmhkqsG~N0pEVrf$D|++L``@11J#7D#Umd$laIMW6ax{ zJ{6xw_u|nXxfysGt>suQpr?w9j@TuHK?1gS`M{00hR51I{({QZxKb`y4HsKr6Z`5Pegg= zdby{ah+f-#|)q{;0)Og=mlVCCZ300TEr%LA+=>{XnZ+ zVsqe0>`H{=KvUkDKR6W)vS}kS>kGPUVGPK8h+@ZKB0r9AGL7~Ku*gU6DEcNWGpJE* zGt;l2M6Rjb<^~D-}t5s1vbKH;M@wA(Z|t)RBm2k?b4oMX=q4X7btY00%q_DDc_mY-0ds$t7UdMo!dnDDCS7yWNBZSK5J3 zc&=~9|0QO~k89P`=iLvmzA(E08Bf*krFDoX0~qvk9LN zGSWZg!HC5zzRF^oz-ld?$gHKnogbe(Di1!*^fz)z@G(je{dC%2TB3c%C%+i)F;7J$ zCbMt587?=wFe;+ibeWTAI~B=_UQ$e^@b~jFSxk7#ga1gaQ#uO>rHNV;6<-sXqE4zp z^y;l}joN+n{sKF~_Z)9~H_(KVh(Zxr&8aptM878jzMEYiw8eH_`~=>X%SWAz`!zr)jR$ zrl^%W6oWyAMWFq-#gF)dl=>WXKukAq?Q1=u%8>U=Z2?`s5Txs^85L6o35}GQj_I^H z3``h|y78B-eZRvzpg_04MBU*cS05N7$JE{ueocdrE^lVf$T61CszDUWw!+-cjDXf( zJrRjJU#ifyFnJeaqr;-bO94(%w`E8{A4X}mc3)(;z)YkbkXPe0LH4Lzj z>Rd(r9Z{@KyJ~`E(idnACSt`8<2I#>m&%CE)cOKo}P zziNt1=CxkU*%HPw^f8TyOiv&(uJC{=CiL&Rwgvd}h!HmWgLH-7Q(!PIa|qI+Dplj7 zFP$aG%|CS3O*}c$pHuX$*%9LsU1cBNhuq~axzNk?X2`MPq{Fsk1~p?BV$c;x+cl zs&Xb$SoJg&wiJR4ekiOn#g??zQ{DALF&B-`$8Y~`k_-#lfH|b3p6%N(%WO-!g^W$I zxz3M58KkUtyuAYR6l|B%Zm%T569+I6Q=_6yy;D7)m{ z$Fm~Qh^d%*mu21B&>f8kA^9UV>`I$MGE-S$EwG-xt17aVRo*EpNsa1^oZKk|SZjs%ng0B079OJER4W+67j>4J-kgGz#IQ`Tt;aQLy@Np{io5vTeznAy3GJS5}P_5IP)kyGpD zcWB0`DkLL7@IYK(R~m;nmfw-2HEoa=M~F1g+@Lae!0oA29NWUe!Ocsf*Nq^ z!v9149V3xg9{wPIfh)s3JO*oJr6Z@#BrjDcXVPrs*<2GE0KhGazz##|+uu`vr5N~O z4d}nLC~fre;LP}Qa9;s8ztmi`7RQzl!u@L`gtNSi!aFf$qZZx)q{PI9vLbJ#6gIl4 zvT-oWD#!29X7HuBrj=UFHYoLd3@>n%uqcpNYLjeMvM4J4S0-Ea`rWr1*6Q{A1zx&l z6E#qYLx|B%H6(;G8c{PP`4`eMMbjdb24~_Kb%YIoMbKr8RS9~tPXCb&OnT~XDp+4(!WPz| zQ@5a3wNt06r%iX7{8V8rzt77*U|r_(eqeyM+1mAQsYIcx(q-N=G&Lq`b?1G&QCF)* ztpX*cgx}<&cloT7dga4~KrT--*Zr|oxU@$@lGTdT8{JX-VUZeoD4u%v9J>2{v2C8XW9paFAZi4P+v_v?Hr`qH6E_7QSMVu z)h+7!&qpMSi4f0^tyrYH4el)I7`XNkV7YjQcUX%OEQCzF6caA@Q9H@;>vNWy0q zmU~l{B1*20&69z^r%JU-+_p7ufw1`$q&XOmJI8^ zl#8gws(IGWG`kwa#&j}HY5p3N|9D4AilF-E6g+jQLF)S&$p>SW$n^i_xpCw1v2$Nr z4wL@^7F#%IU?6Wvax{u_!WvhbmIK|!YTR=Wh|8karlGeGqN$2tbd<^3+!*^ci{XJl z+4VEwG$O_UB3h#}VM4uFq?p8zI!Qbh+>V6hQ-9gW;v$#t+LR)kHoShunx*28;9a}=eD=$94I=vpJ+A2owW@)d5tE~Y=?BisJa9b*0)R0ZC!$j3E?6RDr(&wmzXxA@W1Hd_Fb8}#;o;uvU#bGv7f+r^&6Ble(6Ove(DNLwwGs< zqjYN%bVcYl+f`|by%4+i**zD?P|7*0)OFcDvBf`2Rtz`+Op6+lDGcd?ymd=}06|Ah z*#YeG(n>onyCO!#w--9GB>jq40Enw!_RUE%>u_Nj^hPryYpc6Ozn)W7%Z5n>9mlnd zz6X0K7=?6Afpc{9HO-~zDBbvdBK4RyUwLKTf{jc_vVQpkC^IZ2-gLcRjdN`3QYo5} zprMLR63zjZap=k7OtgOZKak3)xHyq%K1oXa+XG%Hc?ZC~rBhxB2q+wd$yzk@$IJmq zSPLZ3&Q?qYjtz@G84`eS(|S5`bNka>Szs-2?((PG)hwqDG2N z^jwe85p(Q(-lwtx_cZ`u!&xx)hK7|@jlYI?f9gp|lxEXzYkC+5Wz{U%$$9yc$}ivN zT~0jW4F1nJ@a0GK*LM?Mf@Z(cWfPGLU+BF7f?{~ANx~V*SY@*q~OW-RbE5aUm=H0*-Afh@mBZDI+v;BI%>bSPdWZUKhzU| zXWy89o$ka@(|#6GYxFpn=3xQHXWjK7yOtk5iDz1CK#${gD7V`zZmCJVpiaH331t;; zAz=J(z=FeqLzyDX90HTT(n~Dm9hagVSjXV#l0wQUH0_GqIzM;QlmAd4XrURB(7Kcr z*OW_kb0iw{l}RAN8Uo;AbBdfVFV#nb`&l64fvL`gxx^ff)N!@lqGMQ=bv^cW7g|(F z-`LUCLE0O#h0m-@_E~Noq6AU;z`69CtY&wP?Yr&nFc7`%NnIYP7oxk}kJmpddc+rB z0H`U%cL)sD0~l6Pqjjn(V&gfK=oiqpAM zewLNQ70F|PTBlWXzl!Ow)rNS9Q85+xzU}eQY4cRvO8>XD*AcX$_UXNd@G&J4Df?KW zZWBrFG;=+|soq~iFo~45I)cu8A_B62Ey*EX-0Wm_z-9(gG)9tht$G0p2>rcuH;e80 zWvmr-4MhFXKDDa*dALc0vy}6XBacImWUnwK_au15pONOSbJ+o#Oc;3Lb(r;u3Ds^9 z1#=mbo~R;%U%WNSH7a?f!G2MAydxmsa_MlX&4A*_m?oO6i)lb0+1kk}DwusCKG3^R zV=H^2wch&`d3!Ch)UsV_4B%1Qw>G}oXp}+CgXK#)=igbgZzXKP+i$~Eu{~nJ6Ql8HM z^Gi!d}?lYZY(L64uq?L%ydj?+0xbvoltj@45K2RRw?FpsSa-r7kb)x z7^x*&)tBS#^NdCWpt3B@+=_M3uC0$jC2ly5Ux;{pI4^)^x7#$}^_dG1WlOlqXC5fR z=89u3*ih>WD2Bixs=#7RmVNe;SkCUBtBsN7|3oCwqqL1n^!2_bIYdkh3gOAn1X0qx zVYlc}5A-xH0P!v&&Q*zMTF4#$n_D~W5UOupm%gK~axrRXb0lVkaX}fpRM4XjPZY7p z;lF!x_t(M5tU?87^NZJTK9l1uD23K3xwXo<3ylfK28NnQbpqFN8~2}qZKzX?7iZ^* zg6D{9oiac&9rwZE%e~)$If^voAD3-MaC^wC-^JTwpg+Fn`;d zY%ab^w4N!7YrRC`#|L8yiH4zrS51H?gO~AIs4MA$;UvnMTF;Vs>G|)b?W?2ZQx|+& z7aV5SQ}ky7b>xNCygyVVcM%3ebk>Yr_{V;(kbUbQ5T;1a&zpO*4d3^cI4MIt?DYIj z5Ya8{h7d6cfXoauD;Bsu*O!4t+5TTMVxr{srOIEDD;$#v%D8#$RW?cDzaaMS4={&g~MUy`?dy!v2+ z$JXxoSMqBxaOt7cOCO96?8y;oUj?Or0#*W2CPGEK zO!nLId;xiH@BirCMi21)`GI1FJL_#~Mnhf1onZdFZsCcQ%KwH2wgufYaJoISu$il8 zQXD{}BN2w_a&#nOrNFIBM=yr^>#wK0q|z!q)3?ZzWKg)lE$SVOVHjjhHv(o1bx>h0 zPgfDYXQA9D3xo8$Z6k*eQfIX8faZpuwwKcHIlM&Dw?>p7Vik(zVEi}cbztBr0*h`I zh*pv^a3a5TXDW*?9JH`VBXUyUzDzLpd}}{^OMhPVW1VVi2F%Tc0B88c&YmB5f)i~% zXj8zt91}H9cK&|MEdr@Q-OI#sl^t;UeQRGYSN^2!&J+|}D2rqvZ%{Afd&3EHi35^u_ZGeTmWtaBeWb(+3QaaBXBdyT4AuL8z_@L68xqfe>){gs`$t|q8rQw#p`|5Jl#b|Zp3JbP^pXra_$)Ydg ze}p(Bl|An*+yoAN)Ui_|k41V0mGrzjls&CHf82?e^fF=eMQC1xuQUR8MSgp|=z9iH zX5h8{Rlo1Ss*|IE%bby#7%k%r)dOZIx`>X&`NO4mXTQFbf(ox0cVm^nLVkzCU)LBe zh7?CR9aq>FXajY`0SGaLPS-^$w2m%x&XXkVB;tZQH@*!Imy9!ZO^$shO7juQiGji=RuwKz2_=Y z(G^_Sl5q}`c6qYf%Ga;{HMXQoQe%usK@@wKuThM!naUKS$%Qn*J5EdVvnHQOOvRaAR6L~mAG04Qj#4VrL%?RLfRf)>qcCQ z-a}Mm$L*7wYd&9^SM#KTLz2~qI4PF~yIT$7B$n0+CqJymAtGHAjAFZ}ihY=I!r2$jQMwodcjgeXRY=L0zPU^^f}^}>}y$%S2i52Bn0l$Ony2vTQB1hiXCmL|Z_=3Vv6 zResFz>T|XYfQbHYl7Q4F?XL>|GZ!U}OCEz+q90-_mtPact7GTT=eia0Bd z>ykUkQ`mlIsN2YE~8Vz1$$64fjQ@2i8dE2Nq*YWnN@qAZQc;B!ahHYW*rbqVDd z(X#gM{TDc57oC2+en_OKlQ%H~&w8Kr@~)_mj-l`w|Lhw1mOK_Titg14vcopx`9*9y zzC>*L<`kKxaL^}9{X?QXMmjpsrL)8g6$?;6^96v8)-RBL>=c9YQ#|7Zvk1EOWvQgs z%rIk&O>fP$cIlnym~WwyK$*f5DuGSwb$QK!Mo{8Fc=pF#*9qH|c+c zFAO}sw9p9zf9G1`^J=KvX>Y2YBW7Lxha7cuK0Rb^N4O;1=$_w!vL$xyth<)8U5mUN z4oWQyZZa{@bMEHYj|)m#{x1F@a@TAtZfTAdG=AvZ5-Q{i8b`&Tu*%ZvU~0cvL$CNQ z+5o^*%LIpW4R4)bCF4OcI)(kM&s>6x>2*yOlJ+F3xcwTs-umTFaiqJO9)xWe*K3!* z)6V*vU@t%%SeR*1vFtmA672){#hOlj4P?VR4ESHai*{33P8;y3Ww>#6uO1)quNS8- zvO=MP5Ku_8|H!+(ch_{^3~_7KkQiynbWe7e?yC1G57z1^qT}>@?sDC2QlkaWT6kwl zM0%zD1m2RBGVU8CyKr8sV@BX8<+f3^IP2&Se7-4UVe0X`QJiIz?Ay8%~tl@mMZe5c#be~@cXF<3Ul954Qz zubdpj;uODbKPew{2siA7PqDnK@Zd7o`t;JlK9!q-Y3}zjXI+E9^wpxxO9Ml9KE@tO zZ1&_8Jkse9-F|CK>ZZJiJ3h1 z7wyV`)ho2*^T=3uY-Uy`Pcupc`o?3&F=&gMEqsH5B_+?tQ7v=}4E;GOfutJa_f3x9 zHcPrS*CP;ffSy2mdF5%{A29JCU0U#{bP8J!TEA#x` z5k28O=X(5`b7OZhpy6T1Q**W=J&kO>_)%HE{yP`zB3s-mD9`su=$YL*T)*+kxgOS^ zxJ2W~;2PkQ)O`X1F87A>b6%pty)q$}BUcz1=WKEk{99$vNG;Y``rK2d{kayh752(U zh02f%C54P=0-ii7YCTw&?c* zo^a>7z5%YWS*+EeBpfNJD0qH6C2HBs=Rp%&S>Jc3Z4JApmTET~77r^RnFd;J`=8#8 zWQ*Nk2t1sMEVS%b8mQ^i@{7SE?EH1 z3OF2aXit_(nsD`|Xrz-sZp;Kvyo;p1vwMxlv&)E|7e|X(x-H=+Vaxopw~|@9(l1v1 zPP>($?{jD3X8_9QgXv{JPa;(r0E{F`DC^R^cSQ}z$6&h#*C4fa4Zz*O>TABD|;t$=~L5S>&=}SX~E64Xhe@xH>C{Np#C)IH}&;gVt(kz`)H@jjKj>?W9i>2L0k@KkZ{?)2~;U-~c$CErcD$VR=oGjUmnX z@F12R8nf zZN8cZH(^Xpvs!!#3=aBr(TRv1E}=}E>RjEl_WTai-9$)rPFB3*DC6iuM%GXe9e%^b zPtTKwJN+`VVbvAN- zuaC!g6Lv?(?N4dNsC=GuW%Xcg&4-l}vDUSWeSYZgT}Or#jqp0iHy`f9YZ_k}u6{nV z7rzOSn7ak)=I*qu>iG937h)6KmK{ucQ*x069-dN1b*B-$R{=Ac(P@1sM;rzA^prx!Z}{ zuxn)n1$`B8+;Wdv&((xp^{x_Q{h2yZr8jL*vCuy$gyJ zC#fl5ZI6&*ZIH)e<{NUPnC#$|00~$1kJ)aiV`z^UfxDm)P>aETz{30U@rjx&iJ$su zd(Y}f>9zD@r!8w&r0yUcsA9{$>1*GJ;7-PN>WsofBS-D75w?p69^n{4c_Nkg6gIEI z#MHUHkacuRC46?ue{p}~b#c^ryi+k>oWSynQ`YkCR<&b1?fOy42Xi(-_@PmVPt+#q zDcm3sJEGMj9$n$(;VSc9-%_c=y@U;N zrpM2gC|Z?0Kf=A16rFwakUKB0R38h`w!`myf_qbY!Pas)ZGiL@Bh&b6F#IAXmXX_( z_8M{xvCDI~6L>-7(nNO-NVQgUvGa%Gp1Zs#AlfQsZP6|i1 zLj0xs`5heTA6dx5nXeDS+10sZ2V6Kw|IWSlxEv9zIuoCv%a^z{Ay#TBLghz|to0;S zD~2SQa&R#~sZCVs)kOwjy;0AGGH1WZ@oyX_EPN9j%$%yvYVFwdg}yez4?GMAC?(&^ zQ@AA$zcE1(#6;1n3AR^=b3QrhG6WmBp7KzOXqgODv$81H0s1rrQo|*9U2J3*GNiR{ zv2ed)$pqB;6yRLoFP^0nu5Q0^j#3)tu3Vto@SJ zuD^?^4>g*g3GTBS8rD_~;W+;)HS%WHikQ5C#=BW5nI6PNxUFs}Dt4h7EiCv`c)GAN zlbmft>C_$KNp(?~fAzW2OY~m_iGMv1*}eABYZCxlqpc)4%*8`JT~`}=jVlrpMe_BU;A{FRC`*bp7| zNZOOHE~5!i0?11&QsvP;T4gQnVqc1W4T!0yDtkmcSK?)iJNw?C%{NyLMee;p3;aX6 zPf!Jqmtyi1sv7j_J<|c9!9MKM%8BGk-nnFyf8Ody3jQYGOYlX3BXlWvSG>=Ok(4yx z!gwwCW?RPxp7R6ngYlH6GftQcFCmLsrMME7KeaaNCtkZ*iC8jgc=2B})P)py zxW7@aYPyigP)oIUyO>ut^DvC@iT{ZOQW(3zMFICV%PJ_HtZP+OvX<{}5zy+4B+}omz~zC~X2~;<+&#UQjaM1!f_{WVpw(MDw2WsH-ra=@)?p9a`dp zjG+QTca_a@A#-<}!5jB6{JbJ96-eC4MM?s$jxOusta~7l960 z(ew?&QLFBLL@Tz?2F@xzQjWEk=MV3ScuB8sN2%cLOSwv;!Mnp(k!T(!f!Fgo97u*s=py!q+tlj=?VkiZVRUB#?@4FV)Ov zc>M);@?8UEhXKwTeHMoZ9}c;1Ht_%O^p;Ukw(s}%(1L{0F_Z!#4N}rbcbC#3CEX1I zQUW7LOM`T$bR!|sEj4s^$8&Q3zW-; z?&pJBY!_|8IJ>bcsxm{Q@{#U4t2u7`ImIio3ony+tw=Qt@+|I>=ta*S)U!uIeDz<{ zTmJP_Zxz#fUu%!@=f@$D&6I7G@ePu@7W+eoKHa$=`(nrZ6C%MCeE|lQd=HwOwk+X< z(Rmp^aNpvA36XnmMB*ltMY74^FDlKgw_&NBHxgF=sL|M+$z$i*ZV2|A83Q+NHt=wM zMeL^cZ>HI8Ol1G>CrcH7B3&6Q^DjEZcVAI^kmA0gU6xN)Fud*!p)=d}5`wxGdomaa z72{4;VEhbPHX{inF5bALnykRb=pE(kS4Ry*gwCqQU(?7&@EvDb!g%b~kQP;!o{q6Uo?_aY7Ru__VA*Pp;niCy^UkE zS;rD{Yd;PQTojG2btdC3+lr#BO4xaD)2b7#N|v|_YVP4|QP8l8;oob-E`8`IEgfR1 zfffzLaq!JgcI^!}mrh^Q+c#Dp%jeyCNA5(wQE^2ru9Y`gv5` zfA-78M}zBbjiQP#>AG;|k$0Wl#aPfD8ijj?NuSXuC1#M5@52}P&z(uW@_wb>ZAJ2; z#K>K@__THHwaUTQ@bB#zEFlN1a@j7JFEH1CxgU^NFgPWF{S?1Y{?l2t_i17zm{gI) zZt8j?@xYna#m@GZeRz=FUUu`Q<0-2&1*W7c<{&P=fxiRh0ujHQr8A~IK7Wz33z#ek z`PZj0t8w{{u@d@5m%5@w%{eHi>~otNveObz*b|Gy(^MR?Z?pOjuX^iVmQpU(SuYs1 zmVbbo!M76DrufOs;<%Msqmt3&$S|*QeEKpZ9Zpc5_hxKUVICmL*{jT-a4bXMrEo4S$;Yiu(BkD*$V>g%P8+|$pQF8-<_~pu zBPr5QKT4LkyRVcNPB%Dqq=(VR&(o$miyCaNZ?2h~+m5986Y#tF80+eJ()E=a)xDo;e_OPUx$e)e?WE1&tqkPV8*<|?*tUfXrer92ttR6cnHI4b9WRdji zc4j$gTK3pPL|D`@u&e)6Z;#^*=_g0D!aC?f8}>{o(=fT1NfjPT zPydtGNwE;SKnW@?noCJ=3kYEOr`<#bG&CU(Eg~4WIRfm5isN+e!Wn6_R=hS+fU+0n zDdv3x6Z%A~EB81vg2D-QsOi3A79=oPVp|lD{bS?n`{+`h`NE_%7ruy2Wl<0m3NiMJ zSW%9Xim4LjRIyt`v@Kd?5F)CgQ$K&s8H}OUrIcWMxi4nJn$vL)Cd#a1^@$=Y9WxJO zTZ97Y0!*42LtiLuAJY#)r%hng5BIb|MHvMVJEoXps-%<9mr)rYydp&t%dQQ}NF&@6 zy;T<=V(E3efqy%|;a5*GVv^(9_$Z=n|B_eFtMd4cZIPnXd>ZSun>Up_kPK9K^9|== zvX>xR)T^3_D6<9&3cHY2;5!}XB)B`>!Q}sX{F731Cawm@w)^2J8l!-RSIjw$f;z2}`z+~c=+ z<++@rWHAvwak}GlBKt9{KxTBTGDhY-j+Mw>sfDB+)@3ps=8fJ9Hf>mxM=i4t-|nmH zMSn_JI%7(|g+P}tt&6)M5_H^uwehuEEx{idAZRfl$gN2FP=uB@d@R8@{8|Q1U4G4{ zv2|nqb8PTILe5GlN%ZrRVM1i|&A#&tT$;!|8*hRXb;6PVj`;7%h zc^tSL)u_&eD5d1B62H23wp$nqD5HD$YtX(Gg31;hOzFAleN+FO@|l4=5Qil~=7r}< z*elb)zBn>dZxPa>NP!uQm8>wstys9_Zux5M>7SQ3}6LpI}cub(jyreF9 zrQxxprrG?QgjH5mj_!(2;~$g2)jW_=%MO09l>QwYDqU7d`M&v=)TOAwrM6(*%7*+P z8K|Prxcn7 zv>EkZ8>T&acJsb&OpQG(T;C@Z3cF*)?5eKyS{;xVZP`0)G#4B*MjuuVKOIHZz%Z=c z#_;_3uHkv0o!NY-nv%4M4+vN0wz5x__v=@{1y84J5*Zn=4#gJ|@vd(-PY?rDk^0JE zwb*?-4%;RUl^3b94KfJW&8)bp7Bex^hA^Mi1Nof8y}06qby?n}E>e{1guQnC|lql}yxc^kTcFZ0iO zP7mtJDXMXH!bp*_JH+b2p6a`zmo{m*7cg5&=n?rp?Hhf!MuHvI;jjV~`ZCt#V!d!J z&i9F}TOobR-J-S7w<u6jcg zgfQmG>hZT7alTb zA+E|+pN6dZQ$n6EgC=p+taQLb(UV`Jq+4{j%7noYrcq}zBZ;d-Sci$L)svUQ?7v#y zE8PtXCze?d?_SovV4Xf1u^%EPEY+$*_Q z?pRCEmmY&#k6&7JaK4vRtkvLwVAu#2Sx*jVi_~6HnEg zpc&vbT~#pEVxf={N+D#!&eLs1U2GD9P5!>v;LbhlnB!r%u4gA!H&4L>;ew@cmjljt z^AR5&pzFs|?3?NE=aR?nvJPx~1N_P0Cg2m${|CvT?zoU6`+ry1Zu&Mn3{POm**s`{ zU|CP^Kk8>Jd$xE^8=v(KBA9Swo@kuyx=_OXhRl6nVsV^JO)ud(m`6*$3UZzI%X z2R$&DqoP>Q9wOqe%$J&#oH7pZ$3Qa;oPQSp32`-km!~@oz2yXT720pCGF06FpX2oL zM%x11Lujdv{jZHTKFlMZ4O(eu)#Vp_A>9Q3xndJZwU&HvYAfm^T;BqmqY3WHa+q;@ z|Nr21@w!blQexs7bRRgiR%&)O<@pc&`^&e(WNbO{*wtbFd1vXY;- zH+RYV7QXz2DY3k|bRBE)8`gTzfiYPp|<Z$nV%9^L3O^g{`jjsE5pKKkO|t<~c%H^ZBgd0r83I z8}`?Rb9>!SP!`%fH^JQt|KX9#zEouV{EyN9hA^|AkaN3BN17wrGmZ^2lE=9$E(o!S zP1-6EGjU<|@8erK%pJmmkm2|m??8z4NuW1XWHe4KR7u-(-e@T~NCA-A=3$jcGPn){ zXD3myk|&eqS)5zzDfH>$_j9(5C4ujmf}~{ZWWSekT*H5E0n1X5X2Ep{$8ZP&^FBBu z6t1F(OOE{(D#r@WbQcez9Led+HiD=Z*tAnm5?H%+~Vfm?3ai*uJ<{r42l<5f&)Fn1;eJ>HpC0e(`cSmmvH z@E%}e1ZzH`e}up&p_46BdEJG@xTA|{1;4*&eaQWk9gkEqjX9)1O6kK3h_X2jxHU=x zqP*%S#w<_xucfZ=n$qZfrVUOaNZmRdI&HtIu`w9g^pd+dCq}$n5Je+jeWQ?x`u|t} zJ%BdvD!u3n+CsW~0aJn(H7EQI#Gc(kppK!_nb=7Pj>b3%Y9qGNDz_^UtU0z8RV{hT z4pNIM<5PZ0F5UsH!t3HS0}1sT2?PXMcf?E^8Kav23et!vsXZjFXf?c_D%}eHMVtLC zm)z}lC4y9B;(JnlW^i>%sh$s?vpHp<^wj#uw=<8&xfFqe9pg{-q+)F~SXni32)|G%9m#H@+0Xn3HVsXO--LYLU)~Uc zUfrtsb+XojL0ca)0hpHnn_(NIZtLg5{0mNla-o4{p(>#Yp*~ccu74|?k>NRXWN}pj zkz0{yjwxu6dBz>n4ue%uGeo&`SSK6Q7_R&&JCZvFWd$6ayO--3CSl?gM4v%^#RWtl zJj+#7NPUgNNN7h*UIyj9h&yCexL2{X-^>fip=V@>x+?MEDS!UNb91?N@@=P zm`Ay7s%6fiHa`70l5m=7pMQ&f>Sl|gl6wBULGoBgfPek<)JX?L={23kKfXEq0GuUU~&hzHn-sg-g+=H-VN%GdFc81QxMM=}QUTFLLX1vat zHw~gE`Y-Q*Xe#RJW3%U(ajuz6as_tth~nW#K;<@GBP(T8KVBKd*v=LBc^6Q>`Eu}^ zj+J7^t=IYP7Kkc-3tC?0$ey>50<#tp1!NfWfMn&`xs$qBf3+RYwyRwi`N&X;FDIHI zFY696EQdy{wQ$zm0HGRko*<)eoi=hrXV5h`EqY+NcJ0Dy?A(I)gvpxwCZ*fa{mU5; z$vD$lzZtZ!;$*g?uo%q=8#KVz)n8cq;1L(Sp);EHi2NMX(#a0vzdh`9a>Pyy<-tR| zU{iXZ7hW_~T5BzADm6p-c0Z8YcFtDfW|B#!EI-vcE%m^f#i3TvLV-6gn&C}so8TYH z?F#m2^RVM6T2{2Ro}>*+iny(E1LJFck1V8e8A#LE|PQ24A}VH%t1TyW^V0 z&v7**DTXDOxFOtgpQ;QU;<1^_9o{&uO3MrsBKT7#{)lP2|VmlRYc%+mEFO7gDHg&HvB{18i#-OTW#T2Cmdqmph>PziYVd!f@2< z%;x^4uo2~UusW+hC%rpIsV4B?e8QY_c}ZB=Kj~i!EV5-@pbHR@XLj2T)p`y72IW_nS|BMy8f8=e}B!9 z4y;hg<7g)>z{lIZ4El_?{6`nOPZX{c1#fn%aM2*Vnh8v<0y<^|Vkr>$cfSCdEWwU! z7hvsuATJ_5fI^1D0l@rzbHVrS0Fr1K#r_oUO?OWjx7E$68>@+Mx}2hE3_dq9ol z)9hmosY`xPQxaTjaNc?$&|zgqPQzeD%tF8AEjmT_1St_8p3zff-tAYmV!Ii~op$EX zeNk~1a3-qbVR+Ddflad%&&4WdcS{_C#`XZQz;pn?casZX)PCZQHk!B`6)PptBZE3W z=BZJjXxo~pU}|8x)cS8e{)WhscleM6rxsRT!f%&Y**pW;w-hX=XrovX!x6+Eyx}b7 z5XAda2blHdu}mqZq&WC+ zg#4Dz&UkUiG5Z-PGLDdMROIUB%P`z?fPm8{94I#le7=oe-UqY2_q`Q=>MR^(7qVRj zz-Hp{Btwk5=O;2#*kSBHTe|2VTgM8Om;LE50dyH=*t?|tcDu@3=HitrF3kBrj1Q2# z4xj9%XXnw5Cf;BA9Y58A7Q3;S1jLK?1`ESng1Kp*-W`ncIK|XwZGThph46~(MNg7Y zRLdEitHU;smeoI3W3geDj*g z*!80wa(FQ8RodndFB~&O?&+f+zr`J(Jk~xVF4Q@*Lc1(k+h!qM-C;!xV(=~n0;#R_ z-p)wxdW~R)^>2wcOLODDoXlr30Cce#x?Ka@yelhhG8Nn;cV3d50jSOUYhgXZ2MpIE#W$gn!1amdy(f!D*?c?&F?S35tqJZ(v-x4V`zY2 zO%L(3e+yAf8vBzi0PjWE&lJ+I)^ez+EnII9To}dDo&9r`HaSvKrNq*awN$8=Y*-FzX&dHm3M*z;d6H#k zZuGDX7#<9)EWO@S*cysrL^d3CW$r2R@8MrQhWK7M{Ed!%?Uf~QX_=Qw2pufibc6nG zJ{^@4ZMIL@1tl2R^SLFl1|gu-D&cMG=;X_Uu`AC0HP3a`f8NQW49_9rEt~_og+&v& zpB6Ldm-KZkF}Ok?L`HC2syC~7ZOzT43kzHaHMyU}59VJ5GaN9;%*Hb@MQ`ZdH-;z- z0M|_)4`#-b;pZ`z5Or{n-Jv)K+5M1dlka#%!3q&~YC}M)7Y#&3p<)@vAN6W4f;1zP z2itMwe9L5gOk*RAi(FIph)5p?V~hcSZJ#cDyQnN)sJ#a?h@b*=;>y+C4~En)NUKQ0 zIR56a@2JYjpGP2s9SDRAbqYLxifIO%capb$Jz1B9ASJ}x%C@R2p^)T;Yfai$qdB#S zU(q+WGcIX#skN5>0}U8Y3Q%D5Px_Fbc-MP0veLs>h5(Tacq=CiPd% znfg^GbfloX6|!U^9gWz_NBnKn8%ZO^hEi&ZDFWu)HeSW)4$^T&Gvn?0aHOK^k(C}~X zcD#NfY4}|Bd~(?XX5LZfAjLsS{A<4Mf5VJ6P4W|C$FB`q*gVOa_38+3x1J57YwpD6uLtZu{$jI_f78f+FdH(5R5wUs)sb7*1D6b) z>qGEZLI^)mfY;iHEn{bkQ4dlNQt%K`J>*lHApGRrhLg2{BSmzhpH>{YWzT;q=QCE@ zU0oa$UE8vSjWs-Z`D=#TdWpRqM0FoJ%(9%m(?c5Xv z(cTNot`s`JzrFixYo|k+S3N4wrqwhf?maE%9r4|L>^txp6}N*0I8833h&~S_fWHIj zo7Gsuvn$)l^fvCHIOjTKGIN%HG6RSuV%~G!HUc!uB}3SCT+}N%tM{GBvLJMr*<3oS z-!Uo40wr4zE=xH41Dzp|nCz(z7X5NlwXHCUUvxcYwDM)=;j_ztV|Z5y&bTO~LF$ z-ed=CRm?WeyGOU&5cagZDxRQ2lhPvO0ibj?KBK-Z1tEfC%0vblWG94Kw44dT7GbHP zYF#m{o6{!Iv@NUDzD!t=X&&n&#$m>z7w&!>f?D4{H~B5`3?gI?0&4~1?U!;86qCAk z2U9h!@E{lahWY|#9-1EjQ-=aj(JPM^P=PvNlMX9v!VEEpLjH1}iHVAiht!uNa4PJM z<3Y26a#S$$r!YnhP^>rz%fS^dZY<109X3uTxB*^c(bkW|C~4 zfYA=O$dm8_G=0#)zl~y<;H;lmiM%<*>mXv$mBzd!;O}8?Iymf&#!=b=%L@F)pJ(X~ ziwAbwZmB8)g}sGN_d=h!Cj6%Q_#@wW-TZ$8|C$`a?hLzhq4(38(XWW| z2*p!ulxqV|-{A~;8J3@SH!xAkr;|unN9DNaRcTZ@i;n#8`KMqlbE?)uAu_=e<@V!z z=j3EF(pSRk?~TFKXCU!POrFbKzR)dCh`giF>`kcln88+Wp;vgDSamEx(g2U*-yD@= zr3GkJgUT$RprmOl`xl?hRa?HaE+TiES2;IR9y;9`Gi$j4F)k@WexU9?wpzdFbnM1+ z?wsxZKa(b~q7b5JlcASrS7eownw2~*jVs?Fq@aHcE{-oyp?aYA4`!Ehgo)Yt^UIoW z;>6oiQ#FHImO9~|d(r3EA9ZlC^jv4mDt~{h!Dtu#XxT6S9W|C(Z#Txrk>WCG|)$2=KM|xlJ z{*Cc##V(%h0x&Zmj+?}NbOx7q6cP38?~nenrI+`=O&Q}$v(|g4kO;ANzmHXuqJGn9X>y8B}jvsFmr%!i=aJ#!reztfSxJg*cswy2u+}N-Kf`)nC#J zZm;`zxYQo-0j_oas>d%rSqd_S1VeD1NOTkEdI6cYM)ug--dW*lo|4ZA|CA($ao&qnD*aE+|cO71AP zgV_!PMwP}AYKOu$Gp?wiBJSYv`Fc##0jWo#e|j2nH1~pHL3s74A(6j5#0vrX<&u}J zBY?C+lu>Z?bl|A+^+%2O?Rd^w@VN=rTjWDZRHg4&&t5E0PqXV+bjiLwR5g6h>(kQU zGaIyL#54QEGhcYu2p$sB2~!+1e+`6op&YG?mFx<+Nn8=kN!P-hjX<^q!|3Vtzl`lt| zz%ocxY|z6&1;tuXSHwhSx+~_=jQNYJF>~y^ zr}s5KF`J485{h%IxQa%)yzX=8LhKY$IIomo=Af#3BqdLQW`#k4tcq}RMUyBpJ2L8D z^(LLimmN}dst{%vAzGg?TgFQo2Lxr*K4jqOiMqXCwRfu2=enERzCfVjlO9p4(}Wlv z93nj9=08*@(a(F(cW^l2vJL5j|S>s zMMF6^vwPz@c~nZrk+B5Z=udi1M+PRXkZtDztd%aq|3FlcJ57%3tml^ebA zlnx?(;1ryFdfi!mJ>qYhL5&@|j8;$%D-8)umSg!Pe%l*OsRVQP=LPDNKr`s^;Ita2 z_XIV3-FBdT16jr!W);Q@d9orX+q>iyCbSKMEah{)OPfs-zY2V~y|j3P?PEyQj7?_m zTfdGMSLWwWh>eqweYJ^a=G}&SuAOUR>OrcM9vaQj;C7%0Bq7}QR*43+z^`!luLaD| z39n{D-s01Iv-qHLVV^P{$y= z2^tnt{vyx#EV+*FAE$2CIJIP?Shu`;^t)xx`Y@dbx88|u{N0f(M%1c)w)&nQAuXtV zo&a@c^^^5>8|g`A2t^hG66$X=S6$=|J{;==5D<;~shBDYFmrKS`0Lq;ZdtytkCAV= zCOwy^rN;vicyWx}SJ2Wr-r%8H18odo?Kb626DkDk*`+q%Bm?6d9RGQhBm;hALV7$> zS=Z!qi(fo0?DtjtUj@lvO%dmnpT8j`#!*KEGrnLi1Mk52lsEnQrKBzljkODjzeo~~ zpR$Wj|0}=HGyzXhoRfDxei!3jVkchuZvk(=mS5}IW+J9d;%nDi*(-I*{1$`bTpCM+O4>s%sjeQO2-8MK)3y?U-^zs+RR=H zO9=GxKD(T-Hkf-fckb8Y%!W2%95hqhckGvLH)zZsnVdV4aTe*|x3=B%<&`|N0I?6f z@y_`p>q-GIalF+GAqCENbkNg~R@B{I)*Gl!B4s!7Gf%CoPrVb+`o|TY0 z;E%%DAa^zK?G>T3q-===Pp~6qq1?27eAE`P_LlYwrw-m_j-`)b&qjN_pAh7n>}&xf zhuh70e$$&hG*53ZH$_cI^P{%P`kV=_h`pHt-Ax{Khb@_(P$~E|1qrGnM9ZTP`c_aE z;erFs$eY%=`*r$eyJty$OKkA$>a*H2 zB5S&@f<_G!Wd@6T)nkg4biZBi<6O2UVMj~Hzu+AuX2)9`IFs^)$lh(=t$L2PdWcMd7-|Z+r->c=Q64oS(jO^(_`qhr){Y!Yy$#SQ0ev^L(Z6`$jP-NamXns}N zibI8DpfhpSol4l8x-`BkLcbEb+F z?U*f{Ju3Kra|Sw9JQ59jzxK{K|NEW#_FUO*8X<&VbL2%zb7N}y+cxpNhvEZL9wtPV zap6xUeGl_9dQk4Fi>BfL{^K=4T{xTvPe@?v#Ap7a7Xg~U1@Yo!>Ji+xbaHY+_R#7K zEYUptT$q>bni43Rv{df87$nFShl!p&--cxPbbi5#I2w6eD({1kIMK<)&aMHm@`Jj` zD>+0K)P)ge6(!(D&A}PQloO6^5RW5UBZ@QUzNJ%2gF>P^uJW>^czx;S6Org4m2=0Y?jkWbUmD&}7+2UvInag1C`YgbypLLEX-5mpk%3-|zCONjU zg><@vdKoq`@ie-l#7Sb@U=E3mxGfMvvYWcG7VT_2kj;O&+IQLUmQQ?!y{-ew%OHQ( zHfN-q*dm-imm?F4TpV(NWeG6`m4wc)S4H`_uooR*dt2J{qw}>-939i%41(!Xo=G~$ znI*AE6z{mY+x#nkcMK>h40}m0o|EJ|UMW&Pf)bUVuEjBei;5YdHl}^m=fkX(pu5Kl z9woSN?S#<<+xaV&!X4z3f#_NCe6-GqJ2=>=t#prIofyM}*Pn+~;o%^^p@>r}5QI4r`M>#|^ zcF#ya%+|CZt5%cnl+l#Di=q|>4L8FJ#r;m12Rxxp1;^FJ?U-xZzj9=uR_qeTe2B^uovsHX2&a_|9$jv;pO*mP1`NMn_&pl-pD|ioAytJ@`8^M>S1u_#NlaBo;89!I=P6&c9f?v1>wXbeXG5va8zr>=1s_$dNtW?PhmmqsQ3=Ct(0~@ zs>nA|sodlC64zCvKAL6c()|y7T8F&Y)Ro+R-l*B#AoBH76VV2 zy7+?_uIKzO1&Q&4al5&Jdx;lI`eSO_GVVK~)-0o*kY@F>J^noDWP@~))IdSJoTC6Q z{)@grU$4T39K8L%M`JQtza6M$p6x|c-4D``gROvxE8SJ!F^4Y)#cu_$j!5(Ia;mc1 zO|9_l<9=MK7JUxYO$z!#q>J9k^q7#OhhmgFKk)?czOY=O@4r;M)uLxX>dl)?j#%v0 z=!?dyXL3B$gjGu3`Qjkh$4ICj-%-OP^dm+b;d0ENU7(#*2Jm++A6G7a?0eOPyJ~U1JCn0F z@@o6@1?Guy)2qa}?HjxV-x70-q?ZuqaC4qWONc zotN&*UqDuGO5(;M41%Ehr%P3arm1Q3i-${{?Y|&26&%#B zxN?>+QJqGC1xTuxBS9OH>hmTTYiKkZCBP$AbjmslNt5z2B!)<1(#2h92<#dtDz_O3 zn`ggse*iw$uZUMt2=Tje#fmBWKzX|@YMQ8DqHn{kwgfTLc8VP^LW(!;)sh{VeV?~x z6!L1?U^na&RnL7-&|sFT6){oK*OxLzeNmHzQcYtvLHQ*S7qGS?J)TrX;r62 zQlr7H&du_SLkbIfJ}An)I`T5Q(Sswh@z~svQSK*K`g|tDozz+|{#?nMx@4Z=i;rP( z=2eH*uQ*GT$9eicXRhiJFm>gC0Aj=cDQI98$auVyfJs~2O@|gt+DZseNCmHVhoqbi z7IuXWq6~XR%6b?FDh`GGV?P~~t%BJ18muTdd0+mWGVn{)UZVg77lq=!ygy9)Sc5w2 z>m_lxZv)N%xF|lb;`vKSe12GDig{+Q3HqBZTxb~WD-xg4&0TtZxtG5rnAtFbfUnJ> zhu8VjM8sC}gwj}3p<3nSN`lhES2|&ih(f=O6^Zr^2X&Lq%KHrR`twJ}9pshhKNdq-B#su9# zZ*`Sg#36_;J&p8h0a#jF;T8-t82AP341E}HQKm==xcJVJ?p|D-) z45NB)2Y`>qo8+G1US@A|%N9yx+-#88*_fKRIJv9Rvl|c=LaBaO$&+UNc`WZ)z*Gwq z6y*{8wkD!Bn_aOab_{h+l=1@%*vO*|oG6zqy35Qy)l1X)E~{T_X`#8XRl(1%PB)A5 zZJ4OnMO~HykHXJr)FZQVE~?i2(}LT=$+#GeoT1(Z zf;UgE@9#+pDk}~#vLNeH_fX6PAA1!k)^k%pv6~;Db78gvMrkl0li~A%VIZ{T&oF3j z`$k4k`Tp_wSd&EXi%P0ij!GQ2H=bv$Odn};&uH`T$4Q3DMf}zlHgN2twFR;P?Xen{ zBC!_0i)>u_H;eyJH^FAjiTxSyo=sm@swaSPa$XYoXD z7$@!>=qdG&+~~K}DXikZZ^Lt+KfPl^^C#6Fj?E z$}QsYucuzjJ4FY2>g0#lU#+K*t9J*|F|iNUSdK}Kuf5}9zhyHI!WgDcZl_!(?NR-U zSd+P(uo>p@dPORjF5>B}W#?Y<;aa((id# zL4st%RP>gf9|~6jGq)v7XCIf+Iv4D9j?1fm4aiOMO!7P~y84m41IBxyCfJ?Ip7QaL;9a0n?rZI`#fmAY9V7>gp2!mmGaFquo!+5=BN*_nAx zT~)8{iJ*_eg?eF&K0#sswqCrVflR6NfJKL?17bWY%jiVqN_xz8H$10~_eu#N|FiH) z=u7d@NkCr`PkoW&AbR#fo$VbKQ_h^(sIK=>ca|o)6AClhqPFyU@Tz`(iF)>;t@N^( zcgfRn)pu5eflW`gH5|w&P#NS$D8~7tEP;O%mKRT;n0G>aS#+0Z5|x=9EyM?7!z^ft z`vdk_vX>|^gTsC>_a~hY&TlKIF2c%V=twdsn`Fe&EMJ1eQLn?gI0mbz_MWW+ftLn# zWdytzSQzw|>v}1?kbRKr?AmaiXa#z&6mH5ADqYE+oTXo)ip3p(jT`0b<(cS|{slmT z5BuyHNK|E3EqE>N_qtUqoc0<6)#tMTHL>)N`c_iQw&`JBvZ+9Z!-{}Yq%EIfcYzn5 zgW6g`6~nQrMiXMk5w@`cIy$4W7ABI$Am~H)KpF1|=LABRV=Ce>bR>Z50|1)SBu1=z zrk3~ygD^@71&^&R71w&Qbt$!9OWZ(-0k|r0@wuw0MBlU%;B8frKSNo}cox!bAL zi=O;fO-RZ0D!dD~SL^MpX$TfgNbu<^pA|`|wU53Y?+^wD*!EX$5x23fI!KNrss##~ zHU3{n(D55$oWlVx(M|idDgJK_3}Sm$Ew7$!Gkkg}jB<5km2VN9f$~XUZ%ODK-@lq-(N59!~1o)Fb^A<2E!y`Ix}NRtPVYouK~K=t?jql;tE_zq?PW`l>mk_oo$CWnfh$SGvgTn`|P-nrYZc9HKJb^&g%^>9fZ` zkcwRwZay*$g<>jhK)75rhm!(Gp*lp9)rFAK=r|eVe=$d!TQpRj9OHL;QJrT#0?w zt@KLtgOv~l16Ue1Uu=VVCI2DN2&lb1Q}xORsBP_LKXiY-t9h@n71}#H9Ks`?m4tWy zB7Z>d#8X1kYi5ygvpnQHC%Fbrp6laqz^Yy%seyV_qBK|ikCQ{`WRMcWkQLFKn3s%g zN&o&dWn%olRsHG>EeJp#Xv9!3WnTE?cSU$&PSpttrBzTV6Z(#g<=<)cv{(cpqBQZ6!ECtb>? zceAxYVa(tK?vDuZe#d}cqz#)(4m}@NQ$oQF#-(|4fS3m!U3c6dwTG+jzR$B6~yr^D1(*2UAjDi zOKcO1cuGG-;-fB2wtu^Akd!71&*y%G{NSYAy?rryW4lNZ=SFv{j5Da@td4eBC$SaMUZY{qJV4M)gKI^|-7q&I0_BsXrnwlYhIY`cInQ%l=RCaHOAg+Q|7M z)nNufjyH(d*pfnf;QmjkI>8C5!<|KSwFazp*t2*%tP$q; z$`~RH&&H8D9*f#LjdIjILRMf)3(`Kvba_WCzq^E0tm4#>D zj@a}yeQ24D#YmUFNHmSudJ?Q;DnDwx5Ol|d6HIcjJ<8S@+{CWLgK}r71UNSd&oR2l zafToMFKZzJ#V7`GvFa&(FLZm>ePX;<1`#Is^vo!SSYW+M`44nmJIXA9bClUelOVoy z{d!`nZO$#%jrHma2ub>kE?$1{%g|E13GdOre0c}nt>}J~r|C=?&2$PW%4JoaBLz_l*|7ta)^izbo_UYs2gK6fqP_@*-zZqwq#rIWueLR zN4SW2*8BM@Vz43t6L$)yq^W1|vk#hvBL~Pf<+@T3&VI(%5IO8$iEYU8cv66HcLG}3t z5C}y0heb7-3&qVK)YKBsQ4L?^w|A>)h`q~tp^j5vDsjmQfKDr7%mUX)TLj9l5lF5> zT6{p%Q{2bSvQoS;^RqKRqYaUz4ZA(rlMohZ;KUU1&ZtXT!G}Z}E}$Z@6@c@m#zVi+u&K{ZyD8&JSE}v-`8) z6nPdtzGfx)gUe{XR3#hL27p2W)L&!$y)YlXPsBoq1mP-q4=vFPz-9~$VuOrc%-IP) zmXz;-5<0IAYY0V0cNDng8Qf;1U4jhTD*@_u?0c9kCjWYPV!FQ5A1;J)*4S#r@ePLo$lv?h zlfMBuzw5UcIMQ0gmiZvc=oz0HOiXyw-#JMgg&u4GPMGU3nZFN&!%#KSrr^34M~i=Z z8Yl~&tAC11!s#%09^CPV-{Khu?gJ^SE?-SksJ+VTV!s-w$Fg7SO>X5+^?Mj6j2|^2 z@VxKJh;s4gkfg9Th$5lowN`N&(IpUW+v%_PSuoZ&gmBjDd*$3_1_B;?3LGYA=Pi8AfynN0GV`r zq182TrhY<<{lMSXD7?}#*p=q{FN0Kw+GA_Jv z1$=R|>7avtM)YH-|Eo0jEU4sTK^(sEkUBo^auA;ECuG;nVQP&^<7H*)C+HSxKjmMq z+eO{x{C`ZnbyQU0`u;t1H%OO&gfvJuf~24#ARPkI-8F=?fYKseN~d&4OLuoSLnHY< zbH3-S-}@H})>4?6z4vp+b$u>qD+YGBDW+SvmFl{{{JSfn2CQVo4urMN849yl!HIAC z0X%Dfhjq+(cUS2vx;ClPin{1=i*pVNgV+GZ(z}OEyK}K8ZI-c<(dSaWK`J3NJgQ=Lud>(R)G$ zqMM%IUhkfz&LQ|M0Jm!%9`}%K&7>xx$~_2F{0Rc}o_@vjMI?HWs06+5XG5tjQxBU{ z1B1$7vzzrOCF1d31=`EA9=7G9f;Wwi6Af)#(Ti%jnxb1Hm!2Af|Ema|@($&HG? z(L3W3MQJ}r{rSAlXwU_md8D`LuexOYu3K!X1_cPO0MRf0m<+N7i3!DNaRix#;_W8` z)G>(5K4&=6hmFy)+N83W8TQw*)_dv zNbe>^vHOXwH^{g|+TE;wJM17{IK?t2`m5#ji#->$s%)c{^}J=ffp7;50BC>(HM5^F zqVahD&wU7=&vRpX5MKicF}lwFe@iPu;+3vfZ#yu`+spqmB!YNC^JeRS{B@v$tuyMv z9*k@kz!2{pd#m~2cj3gRo7&V#>m7_}?h;YYu^SxNrCGW5bZE|P=6{!r1TsBCew!4DO_FoAL%7QqkrzANGiUuW`Cx5}wsBzcUhqE!!RC zBplcAy%rh=h88#6^(!FXW;E4kbUS3EmAG^DDQCDu!WC-EiUOyh0D8ymIN%`mq#l$4 z<@DMxgn!!S_DQK{8OI`1e@r56pS9EJ7NLQ{G1c5!t2M0;EWngI82C$usgNj;ba=Gi zd=fNnN3I1+#RdQ}!Qzm*0vabK+lF#Ro}RZ@RTY&i8V*ho@Kb?sD1R-_uHXPhJQ~f(iRVRvgU3p(2T~td}-0f|kDp zX>K}Gr^E7T9K#Eq8Ecm)L9Gjv_Duwl!!&-^#&A7B_${y@iMG5<;HF;U^4VXT>R9XA zruJ{ia`O$0F26nEwvDqJGo$0yORrzQDjDtj^nOVUT7Lp$!?va`V$wEMy`OUl5B{tC zeU72rI|MdKgI6EaF15BHYVgcQ*(c&Rbk?sBYq}@*7Tiuosj|AeqyHWM_Rz_j+M(G3XPGaY$=GbC%+ZCj#Ss=+IP6dbQlAX#(UTKChEA# zm{_>+P64>9A}jb$6I_RbyrLgm`VyC-xrfr<6`$?na$^dR()%KB z0c~)?-720GRi76qeu89|RTA~5n&>!C-|D!5L836@)Sb<##`){vWS$P(e7^I+whcYO z2f`ClsN6Ne9psncO3zEj1g*ml+RGW-4)#|M7y2*QzWqd#!ot2rH)V?o90iF5lu`ST zc~0%}OtLl${mJ5IG^ zh$JpLbl@C;yB_OW-4a>xE5rr?zk4rCIoP^MMFN~U9B1)zkx%~KwzGNl=PjuGrl0q; zY|WxtPA}He3r-fW)xpq_&%3(!Y22a(dGe98%wP4-`X`8t)HJvCvcIJCEuVf6btB`5 zdQv}xlI?^ zKQN6N-zRdB`Vt{ry#*hp5nVW5(zo*CG`5xytzU|Ez&Vce%Uk!rf`UI@AYt?G-q2?I zJTT9%WnLiakURW31Xm-@buZLgz)mai>d%~Ilbx7)ClUy#a0WDZa~L&u!%U;GnEDI= z=0=u@@CCv#E?~I9gGmLW7^zRAypRQQ zC5Y0WYZlWjQGI1b%0O=gF#z2|&)>f^5E}=F_G@fRdd>AGFSik1lxxXHj`>A${Qem= z7_(@;q(SP3923ZuVDCeN%GJRw74LU{p=F{dC*9;Ajf9F#xA8YMqqFeUCm@L?B=rL| z=Tl90iq|GbV!4oRh4WvMElgJ3u7b-l4F109BD724+S&1UV0)=+qH+(=ouBR7m{(+B zl-OXQP`RaQVKUhe;5`{}a1s|iz4Bq^Vo~ZI*3y;{F#hK@NC<4S;~Wi_$u;7cY!rRO zPT=m$j|1|Juyi`f`)72LBbti}kDw?W2RlpUVi}^>d?X48n*l)F$yoWMcuwPvyor=N z9Y+l{MCo^NgkCy!aAFl{%metL`m4bmi$AEgA5Js4=bai(4U8oPwxW%8U zaVzN;b2od`>i-?}ZD3cf0*Ikczwk1 zWAMob=;cDJHR)Scl(A*9!N4+Hdrsi6yaTuM?SpeggIVb~PY;#sjQ`0M%i7014@!rV z0so+%%uQ3#@<&$^gXE8DD;A0E`E8xyMB!X%eFnsr``RXr2WM~w^{2Y@3}hPf|KqmF z2L$Frwul=@^+#}Oy5S|U<(o_^oeGXh_+rh0xc9sr_Ln(>F>!-*WcT;HT@J8d5Ga>t z0rAZ^9OLQ}$&RjO{%2KKIf8HmLwPX^5Pb-Xl;aqINuKKDaOmUTaa_}ygwAiiW3RP# zFtiV;D|Du1?9Ef=1??7uz%G!fqENzQS5(@sxKE=^>G7e_EeBUhOtXWaGrSV+Cv=&7 zuSEdm0B4CaKFTJnWUI|K@h%^-T$&AxTWP;%%_&VLYr80_ye6#*+x@LLMUKTNF4l-# z`dABk5r)9f?8VQSxV%)%FdU<&i4>abD%3*iG$21l#Bwc6ayRHXGwP9w4ouYp3~|xd zFM*>$y7)<)HdPZbf@>TSwxG~a6+~bHbOg|Lyxsp3!nrD zE3y=isclw%Pc;hv#{$r{oGCZ{u91FyDfW1G)^#^vOx%l%0;z7T=KtAAEF4(Cfi#ma#S9*qsB@+AF?{`b19%_?BI(8v&@M;G1+~?zDlwG(Irt={#jp!Oh!WrQ3X=C~WVKGW;WTYd+O<8vo6!xMn2)MAe^B z?;lK{ne*%IH_{(ey{E9+`G>R&eK$RF<~Z~lH1;z0hF4M5PUUlc2ASel2Z`t=gA;WY z(9A}8Ry0oEO%4|dyq=Wm7=((w;p%zIMy=s2_-Ul=B*YwcKjO6#eucY?GLw~$x=F8p zb+kwwgb>ug=dn-`pW1e~ZG7486{9qtArfNkg{q6iRVX?{92TWR8WST-n2C(|k_R;6l(04uEj{Bito)#6yn4AH;6BsX+6P{*d{b3OG?PtZ zzG~{0s$_!ZdplyI+R-i{b-l% z7SC*(fu))|tCTVvA%7?m6t_*(CLTNswHF|d#%ISdk`MU!)XIG-eHQGSL%zVh6m8#w z)|Qg!TkED)O13IrT-QA*Ssm?t^VLFc{M3TT!Pn5ktx)1EUJ%ZX@~9mxY#tu}>0g~J8{MH@>US46t8q)qbkSaf6dQEtd^ z$oh9t$*t#plDoI-w`T1Ufb-MymWCSo2~76Af3>-T|0R7N9*59#Y{ zb#T|-pX1qfHSGdcdNHg)gP)`LE@J!Deo*@kQVIlInJ0?Iu<#4h)vFwYE6moI=X>Ja zc@$&=!yn8V2aG`)7n7Y{?>r!nQClNB0DPs}Oppod4dJq+(bPq>#PdRq-NaP%x*kHu zG&~RlW7ye4VFr)~OjF3lD@!39R`u^%Esv+{-&QAz_@79ZDh<}$8TgJD9s%tyV}{qE zRU^&%e1z}x*>6Yc<0Zv|MH>5E?55g8^Ss@%)7L>Y<*m$7k=Df|GB-rtCP4pD_P}oc zP;nY18HI)!gGSd2WwPmaZ@a05-Avz1+)QjEd{F7SwrFdn5;`~$9Xhytnna4X;=n`M zCJud8Uf;_W@Ui{R9wv1gxlk}jd8`jQ|Ls8e&i2%(Lm}^l<=jtxAv(>rZeYRV#99J# z_xHIibov?;gTv{JZAvh?Y`?-{{?mVevTzQ`khaP>CN+C?Q`zDUNqE`(>dj_~p<63! z_%*`YEjo_q$rdHUFXVjPa3mC>gYk==cIG}bk7dY8hpH=Ri!x{ELYqB3j%YM={sBzyOvrWiWM&jEGfhBSP7Fs5k}`peeO?Ipq3SA$)9`nE#}m3b{C*>B;lfs_{V~^sIOnnizbm z)tEm~%auCr#3qK*XE!e@M+D>U6T-^+-%93op$bDM59e#~uGwFw`^{_LS2g(;-!zcn zPsXfor}Qqz#6@1(Oo+0nEf3djKUGy{RzU4!O3*9Gn0)TuqX3?4sVp?HcuG$mGT$fSPyRt<1AO357{khW@u^GMfimzCxfZs9$1LRH$xP%;Qa*k6brom zaQFlFZGmM~qrLo21eTcBUsO^w$LoPY)1XN4`Z;%`L@LH4hT8!azkdRDz6nP+QE{Av zbvcwb$@}L3H=?wV)&=_Sh-{q&l;ZJ&phG z+D&L-Y~dA98Y?Z8=+t1gx%?fI?Mp4x$L>KgLVgRfu)h2RbxiBa=KE6uJln``u=wI1 z4)qCLCDqpB`CqI-!GbZT`9y@#npr~`kDT}Ar<`*cpBdlU8>arEYU5QWrPP?b=l8Ww z3-M5KsaR@<5wt@X-;LDj*4iOzhUhrhY^`lwDl(^O$jq@kv51}8xx#?jc#y8T_bC=M zWaWg^*}2!cC2>4TP9H=){j1^UQzHo+Se4Sa__Vo_za-OFy^yp)M1#9B)xtFjS@r0X zFN+w%R4kM;S0#0S?w476kUjOh4{N#@zsY>bqoJVlhY;0>DLmW)FevrJ_D{pQJ0KaT z2cM8A`m~1!aO1`yZh3t{^IZ2hEB3t*-N*HS-c=oG0xBTZ$EES zP`6+F5%L<~vF;gksH=B&FRpB33@)@TPq^YxMI?sbi=;rS zWki@+JS(~| zaLea!B*E^)2nN-;^!iZA9N-Iwn=A>)?rH(_Me(dLT^FrwSgL=vQ~$VO-cj1U_s#7Z zgf%+@3?(yqU%b{3y=-WzXmt2TAMkx1Cw1CIJuVf$POBiI^YFw`xAslHPU*ImoKgQ1 z3;WGEz0r|Fw};<^OFJ_Yq+vOGrhfH2e%HrQ2v<%yY6pwuesJh5iyBQ^y&!*GyhRES z#n+ukb-|$QKTPh5`lvs`o%KII5*zI*LJr3Oj)r&2X`?4JvD1oZ=T|}cH)h`L`5&E9 zgkHdW9%dh(z!F3HYwZe-+AA_+A;863ihnAxkXY_{wnY&1QmIq+pA~Q%xG^SrGyXsqwgn=`-d(=!=O$o`*MZk7H{Qc)%@7$(>ChRMC zU)OY^1kb>%_&)6MIXCRHPg%nrn+ZuatBH=oZx*OTn~*y|=@1qb!k5D6hUNU?YpI8w zl)G>Fm8)~Wm5>V0v|Af=p%C|~hRJwDnI`bVrO(7goc4(0Yh97MfmocGyZ*0pmY361 z-$`W1_giATFZ%GM5N0?7o zX!nNLoOid$B}b?2NqmXS8<#(I?^&Yf!~3nEZ^2+#O(|dOl?YD-(rNLj4_>DY+D80H?r^sK2b-(qn2!hc~cD72)+-b z3%^E?=#BLoII%)Ker35I?v4(u_>F2}C&*x`?97X2LNekqB)r{w8|X>Ujgkz=)X;1E zXn1{XqHb>)SyJxekzwC>C}5J=Iub42!F$<8d(4WR?r8TZA?&ZfNbl z3l#Xc!iX6C9mE?w@(Bl~cWNVOkZX{TmOrh5_E4h7fOV@`TNL)$Z1!w5(vIK#NY63n zuTMY<$haUi!C|P>Y71?W!Pp2AM;#C+IwazOs9olhakFez=6ahbdTP|#>Ho#Etevwe zf>suvylmH|Agg$SuokJ^Nm`jFF(XLDZ3I0C5(6rj&bRlu2cVvmo;sR&-ix)K7E+=| z(Ttb&lRA_1@_o5Qz!kA|ve4E@VZkWw`#ID%(~HGV0Gyqo{>9%S4UU%Ks#C64vU^QR zF^3lfz%!0oqGLepxOVS9!Yrto#buu+zbx@UIjPqwA6xLjDFYEZgsEkP<(O_st~jN_ z{Rb}Brr-0DkpFHb@skjRuFUCQxPaoYN7dv6uEjtsfYxfT!V-1P;NJo9gU#m@ac{Sk zBn&Jl6`%P)ERto*7scYS*KHgC20<)E2B9_%l4BF_bof~$g`cQL68)qO9^nT`{ur9w z%hyLY2!_5C@i~s)HuD@YPj+i>K~Kom`OG|5HIt}lH(pioV;va2W1=VI75+tKgT5`U ztR}lQ9r_`>h8oK-Dxa?Qs6jGWH&fj8l7{l9d8HbHO)i2)$d?ZgC2bMvi{0^1$ObYC zhUpY&4;9#rU$?Q&_)ND%yc5-AV1Oh$P}poCT!*UOm$g1dSWT6-JQMAimZrJ@ILhnp zW{6$IYIGgh{1Z++7RufSGISb^AXVR{eGf$bTsop)+L3$)SJ8XA@Eg+m*HxBmjj4%(3S$Gt?4N^Z;{B zsXK_qRFEYMyIU@J_XmOH*U&`za3|RE#IJW|KJTSWaWzK^KmPyLis|Y`z(&MtKM=R4 z^I|*`!Z~l8-cKR(F*>q27s%}6wXJIP{ES^KUBD2gmqLlFE4a!Y1aFmhzx6)b8Z^rY zV_N&$g2Fkn((~;IkOl8xH_#4qW6`+D5Tg>1ILM^B!97B0-vW>cKP-^L^&zU_V*RR-@1n>{&n zVEcp^3%|A#C;knYYc%8T4{cs6ZznBSaP@UPY(GVT%yo1L9PF-zH%m@&(%(im7$!zu z{1*A2!2gm3Dx7!}U|vStKMNimy_A@+K!#tdAx5)Cl5lSp!buwtJ}?(jRh`)JjdXn> z<7q#67A*>t@h@$uCPJmD8T!_d4hfr(6C%{{vM&B)=3d%J{_Fl!pOA3)JN}vf&y8Ra zQ&q}INea!B^uIf8bnkkg*gb^PdO$m|2u|B$k2sA9VOd`VHW8kgzZJw)!6$?XIIg4W zT8Ih4zb3fy3xhttijm6{??(O^Yz$2gn$wE1P;VT3yQc@9a8yXlkr(>e4{RJ??(Z@K zosEvM7KXNMn5@)cFI!d+67`sTdMPiu_J|4=*s9&A>>3tdmzvJMF73C|U#uZQQxKl! zINrPz_Z&9)2dpD}>XSjd*Z5h^V)yjzdbW8$MdtFQd(8Qcd#Z+7$omjPsv9pbkp7n*_PJb^RRuT zcD?9WiOvoM3;CK7kYtb;ISepWsSSEZ`+5hIMR6C(EhlY{RqiAXZ{HhU+lrd`(I~fP#XA@Kjwy{6H-v&l$FMns!4|umr$|DL;XExcT%7k6r^e6Nt_|T) z1nK2q{F;LlweGI&FO*FD9Hm&7=3G59$=xPu*G{-`r|i!bKB$yeW%`?#N=Ax%eXv2c zFqVk+RatV5=7|q#ni)|@EYd1VbT%LQVQJjb5%MBCsO>I22TvP54Cz8G9y*f+_5HI^ zZ#q4@G8{p}vFK!TXo6>0Ze{6%fh1WbTxE(RByofpMdJk&^EMM1i2xH!fBz2Whr z2x_|z_H|(&AE(}ReP%W~3*uKOgq@RMsQ(?=Y*aO^L56k~!;?gq{O58Ec_R*NH9i>4 z=E_D2L$relOxv8)dq(c|L8`c;kc58u>3jI;k5LoA?T>%;5YDe>v^tirN@?-Q-22uz z;RlzY#7S-^BhE=BYMBiBF2?>A93)))4NPU&mXZ#(pAyrxN6Hwz35SK1YhE($AbGqq z{rts)NhVDlwLOlGst2g;j&}JMw^0zD#duInw~?<#6PTl}egp(xyOY8&7(a|KL?Upy z7MvH_P6C|$AHq4NcD8hQl)1VJJZr@zgR7)xD6DH4Qah0&kv8RpdmzOpzF*nn6Xup2 zDetKz`DNc ztUJ+#2_9Ur*zf;rxhMNA?m19(Tvf&(-Z?JO^L6EJ_@%6;9h${qwPpnLX(sp(!54fs zbSaAh!X@-JJ{rogh|oq5V>mWfV{K-KjpqZao363eg6FV<9zP`A_wqwH1nT*s$@vc@ zO)gk=t##2X74u{m)L4;J(1-u%mv+Ej z`P~@L%C%O9XjO7OG2!V~A)eRpGcw7j$N zEAG5&+fcnGVsdB7yml;Vm+zB$z^~|YD8;CgXZ`9e()sFD9OqGwg+W5B6cnQ#?nJ0J zSpAd!wi8t1vf0!+igHo}>d3?hkMz)Ogr`%hAC}1BgLhUzizRheU20g|uJlGe*8N$Tk zy+ZW)*S!zEP-c{){|1E_$AU3~1|510I^Q78#mlb|>KJq{DkgE@P46YUt2hbr!SoAQ zr!V@3ayzo0;srY}M|Qw@~|$Q$bBSlKz1?mr4{D9;bJStbm?s z9nA5txemk{ILON&Jfp7%68vaX#6$nw_%0A;(Jo=EvK|`q7QKqk@@?hX?8ic7IXYzg zwtMncYEd_Z9V;Jxq}3-^lzPwW%2KVn{HRm%SE>Krx0pun@}A?jiQ`B7#vpnYY_M6= z4ZmJB(CdcD@AQSm;xvd6}17G;52#(#3+036__R+#}mzzowr z0AQ$&YV$(Z)y$w?-Pt>kWHJx%TNCJr#1?NRqIifU5NOL1qi4u zzp9vhJJtBo;!D2ayod~z_uzorN`4HR`r#5TFg?fo6pf4K6dmRa(UTmMRtw`Uq$G(m3c3VBZSqWeqQjG3s`W6K(Kg|?6M@v*M!Bp(W8>g> zv_S0Qcn7N6%!6*SP(<>pEO@E~`+)GrC=dU65Mrz@tJ&yg%a&e-T=M*ex#qT#Q?4RV zIGk|&{Wb%o+8dWBhLDH?>|(}9Cz3w-LCo{a<@w=);{qS$(b=4xPDeCPyzS1eu5dwC zBdbz`IMVq3Hlk@{WLh#IY>|KoTT<7>5^X%^?w>5pgO1a<+vp!|H}{g*|J`legApY)NZO%>q!FP ztkJepjmDVgj>o-~&IbJm1Vp*7-A`)p7Kg_VSEL{t*gGuJL)HFVe`;LTIx*lB`#Xa- z)O>!S8hjeyF(=nnhQ8t*5stVy9rc0_{;DaEeJ0ONH00#qkHLUydMF01Oh_Pc&`3Dl zF+DLNa-50>hHRZ~ZU-Z=asv1bN&@)9_-=q+y5ykQHys*s7R+tv*wfKs%d7+ETghmk zP?aaTFdrJX-s{=ODw-l2q~LEMXGI)09!q=|v`=TTYu4vOmtM?OBy#q8$&O>7PK87o z+aeVub!`-DDCH6CJ`5O}T48Z?BW*>L8(<(q#KbEW(5^&1L;n(642B|VIqYKRa>SzZ zc<9IWphLu#su43p{pIT3ojo9%HDoP@c{EJPdAHwsd&GgOm4IY>Y|>DmVDi`|$9P>b zhqn7mzMJiQid8S+m8%a6i(Xy1O=`;@RP^HHkKZdIA0U>)HvJfPc%2jQMV^gF($RGI z8MKrIQ924Drn{1kB*WHjH;-bRX50qdM>YOMSDro`#~ib8<`}KyK@W9@czozY(7t0cVFB ze-XjiaiDCz#yW}367OHCus|PwpfBCzrdiotH&P3Igb9YpDqzMkY_}uL0Bardjp0rS z_i`vFsu!6Db})cSDaahnDSFSuxS^Dk2Q_Xo{8qU5HuP%ri}O*=yD~lLDjI#|IFS;U z*vq@(?hvhJYbTAI0~fi%EY~+Y1LT8Lgs80}tASz|*#)~=?)u6jYb7MkPa(eMAo97h zF+Ht16gl8P&3TbhpQev*x>;Y6v{xik<$o`E7=7izdg<}P;DDTAu?YPoCSt|cF?HJl zaCQpew*@V$oQ_8-9xc>c4yTKon34C;B-5dbWBhq_0L+0OdW}O?;zv&zP4Bl!k)@1e zw3Y6E{t)@0uAOiGX8|m0%g8-NXt`Ekn4zZfnZcheUU+-iPY;0UgVk%S{=7eJD@b>E zv=Z!?>?^1y*zuEfXwr_@(w}6)3KUg=3UWhR;ZZsI`{_iZ8h&%???h;I(kJ~))6t$! z_?uTCoVNq-Y9Fft!{r=CP!Wa3L%7r)WwX@uK?1+6a0&ee`J>UT>zahyVy$$(#v9~z z90dHg1XI|w69X?kSH#dB9m~TUaS@+ZDqTM{YGn?we_mtu|7x%mdt|Ad@1pLdUS^kq z<-fo#uWil7b|L8v_-X&odhM7~;ACfvaVKMW&7=KQWFrxI6_ z6(P0p6mt9u_6_Zri=2Q3{~$f_!1K>35ppb;OiZ-sP6r_efCO!Iu)7S(75Jlzqg`n} z!9b~?Do}&t}#)qT8pRQ_8-DisXtMAy!}k72o4j*Ilyh$W~EaKtab+q={JN7 z!D?ADpY;^HhdNn{r3i#PU@OFLcj)ElK+W=sE%p*tQLL6B0Uyg^1Z|^nNTFfs{@UApI)?5&TK!_#DQ_$B?+R@{s(vc0m z8}VEM;)|C^@hv9zo|v9#d)FNwZnt@Dwm@>Vfy($x>=1zO@H|4i4W9A zz=`dH-4@Dlj@B-0s=}awn~s7W6Vy`q{3LDMT39sL?J6mVwnLR^H=JN-gfeAiNj=%s8C@ZDl9ivBm z5CS!JwxRFE`@#=Z`S`yxDgWG2mZBrZDynI7m3oynaOe!^CbRc!P4ojDHj>s368Y7ldmLo((d69aOYu{sY$KodQt} zS;Otu_n_tdOk4-xz6DoOw{=4*KI8A6$jnkGS~s6O;4Nw@A&WS(KeO(Z=*qGKPY-y5 zISO<{!LZL>K?A=G7FF>=x(TOAUC_9{e{R9aU62YV_cymmwS7mUP&+c2vTokWNdkYa zs3Y^@AvF1?pu2fV4v8()Fv`*G;IEL@5PyCil@j?tj6{uO%#B*8Rc5yc@MF72%9`ie z2ieOe_S!>YGcp4vp98vrajiiKJaq51jNWp)1sAd;aOi5@vV68q5m(eH_D~^`FQ%&! z3Rd_&Wc>Mi7pA%}+H`ci!OVI}my&K|!o?F^E?v*ljf(OZ@hjo+z+An4Op(~hm&xhmIhnPo}5Qn2(7f!9&Ob3 zZ1^8;6ED@aXO-39ta?+U$u8&r%suP)WArI!O1g}m8mZJY6X?L7xVR;&u?m#V1XI!^ zla?dI4iL?J8V4MtGmIul6Wqa%;^as(zlV1Zg}CX}#>&yeMrnBzhv8VKVqUX8?E{Ws zlNgRCp~EE9+>{TBOCufS(q#jNv0x;=XjU;T&>B?>n#8&&{b$5COora6_w_?GSH%|V z^Sx!ary*~(aJg)JfhF9sRIqdw;3@OGXuL0~zQ_DLV6T<>a_}pCy=3snD-}34XHAZ6 zpHPP9iK^L~?LG|D)7Dr`_O+8?^Dou8J9r#IlZbZQK0T!Hagtbcedv+e`hwuY*WnCJ z)7uf(xd;byG6(paEa-)LM7H48KqJrdsR&oP9tb)w9rUA%6Y3`P&C`?{8G7VEf>7lV zG$niqbtwp+lK6Q_V_?s@DI?cx<%~(B8F?#s%T!&d=6QgHNsJ;}jEGHFT>-R6w@-fJu>lWXYpIzStYt*kA-EXc8SqCJCOAO>AY&x{sF3MzW76W()6!) zJ>Mn$Te=Jo@+9*8@|%owy`CO{HvfRISy6t^wr8f;|Ma3~O7{d0)02w152013vx?ttY><>k#*CRX3C@6DDUQLI7`>xov4esV81*fgRUbBwT|%M*PBY+ zO8DF^X}wP;ywYIJ#0x+tM^c}0_}PwqCD;oOOHJ}wLBXb}Ilv=obal{SZnspm=hB+y z^3D8%g&%5-zpu-3LY$+_!dy9+4E3owYC3yOFYbu`Quqp}U!6{P%}>v& zgA0W9Bw?j!r^I649JTfSqD@W3g1)(*Lck^&zXF~ZKyT{V*Pn5FV9WpS0cec@JcgT1 zrguGd=B*m;D1AR5>qiI1fwH@F;{b}^ivKmrmwzW0+Ij~B#{DT)CDj^_SUiCIRyfiP8~E0Eg$ZDD)vb%1oS?4_v`GOEXHXa)Zq!dsd!FH74v zX5RpB8tx(8Mz@l9lvC1kJ$kIdXmBSIWoaW$>)&V??tOI8P4-8EXtRT=cDnx{ zvO13%X{X<~4U@qxXT14`$Hym*$4S`+@?+@y~ zX&Wno!A-&a?bY~Lp0Xu~b$8T`y@u>zxDZ7bQP^iy?tLLq<1v-1cW0v*hdltHwW~B6?_T;JNUd z50It!o5`o>bZ@?bI3o%C$N{ow0&m@1v;M?U;Aaf$NFxkY%pvD99|4=H-I)qgUS>(p z(_9UTvoD>Uo&183Gq-1K1e9oFplDM%IXUTE`uDP|D_6p#J^jE`?IydZ0aTvhIFCDU zG%=~6&(|4;1fh67P-P^tX_cxRjK<9L)9}p3C%Y)v2(dTt3^+%%JRMBH4kS7d!6}}v zT`UT9GZ_lyAHk3rt~4K{NV=^0lemL6SikNykOR^X&N`iYxRMwB^i86qW=6CVe@+7i zRN95oFI6X*r!V5Cy$pWSH6i4|b5#FVL*zONQP5 zOHk~x=7A3!vC&~z))DQlDpe#&={d=}WauqCkmtNtv8lVe%)#n#LV*4e)*az$B@ULd zhn!GQY7nsJj#uxsBFQ)b0hMvd&+Sc>>k{=Q-7~{Rw^l*PG+CzTHW2_-Y z+wPoG@iSCV@tKhlt%-`s011n_R5#UgmNZ}MF$UV(2G)LGOIlf>kP6$eI$FM4*OW`1 z%w!ZU^YTD`!&cCwJ%I-~;S-k40CNwJ=Ga!eDJ(XX z2vTnXR*G6ssj=R!=HVH8ekF~Y+3;X7QhxJsvnWwjY}wWDsL(w~(A9&C{}Aq;Mxkw* zuNp1cc?TtGAMNhyQWRm&dc{LCHCTV7oR6qu+oPG}lp!)Y*5~Is*Ua3ooig|PmsSd| zSuyAkMCTKxP7-v4S*fpT*bT??G!hF;!*~(WNJ>$1l3dk#LdFQCJAM7lZHs!un~yD~ zw8x!Xioqqb!~@-?qmh^DRfp_32|^fU0cp^#|%p>flnNGHp&@lE29_^SEzW zyGeA(MM;t_*I7&EdvXMw;>2-IE0fNExLNoXd6jIYC}pzBLJeNtPr-x0#6m`%95fGi zy%1>Ud>U-2bfMhO6YD8GWvXn2;TsXlejxo$COwylR+X~uR{Urd{na{#>6N27ZA_h7 zsx8YHvsyYky@RTR`WQns&hN!D9?P-Hmo!_mZBPEu?58H)@!PI+$D8a++GHv4dOt;3c@gcKs)F;TF{S&Pvt=UKH+|2B={;a48)#1sC z64SOvQ)qLFaJWW9+2aY0naXTqb8z83sFpsn^rSC#a>wWJ_If{i6%hLm)%&PwhBCdz zxC0fdI|USYl{5f0WWCZBKsfdU^%b~t;w;dkRHO!;zb}lBg+cM6Zp+z$8*T&KOo94f z(VuqQLY383pPnej%V%rzz(Tr4v$QO%1D&Asa(PD@EoNMCB@&rq7lw;-8xSY#w>3b% zEpnOi4=NmOd)q=zWUgc1ef6!X6y8UNK}uiju5XgJq7xOBF}K3bTXih6yki3RMMeP!9C+UK z&v1y}!J!vFkgUyeq}m;G-6eZ$I@U-rwd2&hlBhT*3w3KtwU65PG7EfEQqrS$TLwIz zNoAp%Fx~WN4e*poO`OFqEf6^rTTK>D;Yr@D@T3(u`!0r_3}iXprI(d$hAN9(?GO8k z>dle&9di2y@+^Y`MY+34n5NHmqQT<8AO_E$ZuNOIdB>ZRcXv-+K|e8~Y~P^J^?ttO zDjt|tMkdM2^#5o}mb_ajO9jl|_hD|y!?JMuaAe1p$VJ@_Qf&Il$>H626 z^HLj5GDZ9JYDZ|-JWVKN;{B)RTc2YAGD3K`71U9B=e_`~RqgfLzs>5zm4x+b_F6#G zobXDeapZL|10qs(fYG9%DFT7q$|eEhsOV8$2Y@kVAd|j)@<-_92XmX5Bo^8W8D!dC z0DaB%3%f?r-vP@@=3FH*{!}+ANk6@A{;)WZaueGkpnu zcZ}X+xBFtpa~T>mdHt(OL$9v`2WP9kqz8S#6~&#pnh$zt-cQMWDhyBQS!1#1H_9i3 zoOSyC_ZQi@=N8`h->bp1R^rRMU~#!w_1E<=wi6tAUMVfXW-ear8LCU_{Fg_Iy73m~ z8Gq+(_IZWU!{P1MCgV-Vw8RZy!~R$m62Hjtmgs(-QBX}jyKV4V{xl-jIZLC|pt<49 z0E+(-y@x`PELcf*kSr7dNhWO_Nn45WNZm4NYTW;STW_09ercP#U_)~gbUX&560^3^mWAXpg7kv_ErI#hhG)Ph>{tok8*SN8T$1Bk zFj%PumA&t$E&9v64}z9=_3+8W1V#f_aI7!LLN5W}9hE%+({5}mFDwrwc$iF@>$%0h zfPwzBx^3$ZsP&;W%>iE-GPgW+lng&s(9iCb&eb<7CBUP(atGQtBkii5{t)_~9 z(Og}BIKrV_ESm}5BWv@n;sS2oW`rFa?tvc2v+uCd4X&$(-eEhOAEV(1;6!ERJgK#v zHwVk#0TEENMg0X=^0kHh$o_)`OoI5NfkVS)+F?i*wJ*KSHBC+rzbKg#XIN}dyACb z@=h@yUH?&r{wx3fT#FM+9$VoIgs(&}L&SnsZL=I!+cne0Jl`IOUNyPSxB9x)p9F_4 z03FF*mB7IEha~SGmj6~=m&>mTxcZu+ax0SZ_E$xttLc4#$c@BGpfyOZ z9{A*)U0KOF)q(dj(|QoPXA|f6E@MQ!hkxrU-^rIBn_$AMpj!B3zEh0rO7|&>1dpC73ZGR|JO7G)5Vo zn&Q3bO!SEtb+rz-$H__-F*wW$xY^2I7) zb#Av~Mo}^G-LYJr*Uxdf`JY=^f)Q)y)MWL?zl80s5iK1Tn|{>EPLATRjzycB>W{q4 zJgl)odIF&8H6$h_W!}zX=E}gFcov4AC&;{ zXu4q5JSOF>@^26KsYF>iqR^ZyAdnNjLg~QVi@N0uRmKe2yDq`417ZuIuRtIqOVt;V z?zP8i!bLoECkbSsYj}FW`&Zyq=K>Eg>x}L)XwLEe)c83P=h_ zH=?A{-3`*svu5vozyJ3u2OnVOy4E_+UqUFD(m>aps$s1|{dLMPnWZq~RnEG7+YqBv zc_?Ez_6OgK!^k+O8`p+Se+s+8&V3_y5Mg=AqM9q7oktX0wh2tKjCjUM$iR&aNW+_5 zC1AEbtg(sT%JIsKA+U+iHL2DW#fBuI7DW1`pZyH1j0x%59L;GbfB^FZfb#!76;ZbTyS%$4cD7%o}=i zlJM_NCs{|++6gp_Acjk;?&-7Xe5kA_g3+agEpOAbu`CcKu|Mu?;%j3!S3>9gL);B{E zoQAc9AYvr+z&uA&>bnpFcRq#8X`fAwxBnU)OmQj47P}j}b39GnEr*L69|BD43GrOW zc(X8ev%T0y1HGJRNlqX?)E1y_om4KfT8QHNzAW^oeP{3I*GXp`en& z0zni?5#Ct%RKy`LJOU*o59T!nQZW#{STY)?Gip2ayfPj=RYCN8LjZ3t6sPzlQ!Di& z^sZP1G&VoVJ&W`yk*0cz zT~H;b>43^#tkzJiL=wlOxsGwa`1wil2{rRs)`#MaXiL$)$R3&6#tPy)1=P!{znsTs zV_lWsN82xT@{FiRAoJlu%!waF2bD!6&;Av)y@CdE3=D&ch?TyVV~-u<=wS6owan2g zEW-L;+kQY3v|6L*6$UCiVzKZg_MQ-L zFKsw~z;U5O!6nQBTNt7**pbZ*nt+6|Bxlmts=j+1ip*E8$@!>iLNtMA#?ksIqeF1& zbn6qHGXW~zqT3|oIa>DivQNoDAc zLG7TAeI$UQ4m3ao?J&F_9DI^1fuWcB^)Uj1 zd5_5y;|C6`1lY?ILDod7s6n+2t+MY57V5#lVu!S}Opf9{XJd(G#PQZ2<#sVTTv>*D z5vF|hW693MZzuen9J?o^A~X?CuR(xS$dC7=(&4R_q_hRJ$eAwM*ocJHYg#6Afr^|~ zP1`N${wh2Aiv9z+Rw4*A8@J!r>FhZY=j@M0QFKWFwXWEUBxjKC>FWi4xkVmun_}&v zxFQ+x{Ue=+qOuDz_tDQjE7srTNl4}>WYRYIoP9)QK{eb)JD^MEg^v@{32`(0FALy* zI*NZlni+z?a9{Xi@H%H*(ay&4pquH0UZfZ={MiH6?iAy5=k4h9iwGYBSsv7v_;Zw4 z_sL`r7Vx;eA}*1}WzDXr2eu#)HnAJwRjTzN%F?2I9ITZ4iF}QBLD%7pywu<$dyDYr zFXZ2d5&K1M{J1`bJhF$A%-|*FiP-O{A_I8GcvjYwOOje@AG&y&UjsokG*HF5*R6PE z^JaZxBXyKGiPJ!I!)h=s)ZgJ-g4%6bl$02k8@`{@>Y9*+76*}Vq;SQ0b9>6+$bzeO zudubId#VjH7l!?6UWf9a@HdZtR7XJL*GSO%&F`NtoCh7Q^|pYA0-pUl*F4XH2E7M` zUHJ=09DQEzx_W4TR{`lW?|R!c*EW*Hos{wgTxoq?U!awOF-2cMo>+%@Xj~Tq)8wmF z^-o^&fcqQcZn-wEu(J4M+p#H*nWf^BUcGK4a7E{5d+-tx>GlN!P)hzPEfrcim+^gH zAF_*S@Ad;O4epp^>&a82yPbG@#(dgtQ^DM*(}R%U=}tdZ5YJG!yqYH2`N&g;+zXaxY($-bJe$(A98n&%b-qG818(NHy=gJDh>D@5_W62SjTgI#{(T(3PT{dE|RIlUxr+?hft5{ z5(oW;*#LpG$Hq_)mG?&e@5HUYG?FOeC!n)c9t>%sZT%dgQDnSlao`3qXnZ3I%%rIJ zxdYUJD;GF}_Z(;zaNk;Px!0#+;v_qn;#>N~Q3y##lW^l%5@dKn=}_>64?bGpTRaT@ zHN^IR(h*4%m?T^kzOEbTfSyjwd{kUxKOS}Fx*=d;VX;bi2=M4?FpcA)A})r2h>+ne z%fjNYF|w%zS=>7d7VXZk;bWjP5b0Q<8~K5TFDeO?26^BbBiibfH`n=UCMErR7C0Cc z-#F;(<~Vjbq}inm+S}*^CO9*_{dCqC#^%wuMW}LGE-H`B{72-Bu@!wWjsfp8Ze&zk z0kDy158GW7pIttz_HTckF=2thnEfLikAE{lPPds&`fuoaTiMxn?shW47Dkg*UE#$4 z9cw`mQh~8t3`>*+^R}IZ5u0xui|HhlW=5s*pa@KE!=~(wcA@#%di9;LluEa-`S|OMy{$bB%y-xNkA+yNlZG_0u6aDIJpI&y^PcsX z?r_hpi;{Y?(>T&R#gI1qYGvPvpgpcB3dEOYrZ=K*_cL=quadMC1{^>d+eg_t<<0cI z70@i9F_MESlU73a)x7H-tbMpSZ82(j5-)OF~*)R}2Hh>1# zVsM||{uJ(N+}W)xFM=mh;rf6?g7d>`JRh3sp#K{O7Gwj(p7vwafxTYnY*WdS4kcs& zkP)MROX*U4gqh8Az0gZn?RRyGOLX?rhXwZ(C|;$3QpW|AOn#VVrvginOyms*G6fd4 zRSO84f}_y5`%8U_tXbfdACU{wIsI&2e9Rfj`qwuRM{kcj@^7U3`iA%3ckW5~=YLjB z9X~wB`)tQxn8We(@9iqWxrr@5wDofSbjUPBKy&mPyLSCpSINnN9l|e@nX0gp$aa75 z3!^#9KIaSD{l=uH9-X4X&0!t8Kf>dN+8G_}K2Lf6Z>2Wcpvs5vR6aj8@4x=%?5)YibazrU7j(EqbCavdh!6l3>5iuF>+I)Se9K3D-I|Y-w3X(3L zZ`>`|L&~pN?nxq+c@&YMBcs6X!bNqqAVeI@DEb&qxT4wQ?XfhZEG?-(rX8TZ`J`bV z3_NPC7V<}jC%)ALnD;kK6MA%sQE3ul=e4Ci>T3Qg#zWd1R=KP_~URI2zL0#6wjUzEd0clh-{X1kSm({c*l+B%ltst7%TxRHqYUw{f_M~ z-YdeRTn!SqM#u_RPDt6VUUGp&L9y?2qE4BvOKwK3`vz0awvRelrkH$V_gx?bFjQj&0ap?NI7cDO0oP>rX_GkgCDDQ#*a)v*g=Oay1u#I>eQ1yOuW zYFCNxNkMhpB$aU88ZNt%;M;Hxo-sQytyDG%o!~KCH^zmj%0GW%=vpU8Y!jlDesf4h zl2EM7PB`#ZUk8Xh&Zx>MM^4w083chOui%?{;ByG-m7?Ky&G^Kri%y4wir_}!MWb@j z(ylGUdY7I~dO;eQP)qaISPRph>rn5DEa$soGew`wIxe5v+vvRoHzi^YS&QbIu(+KG zsU|_K zFE^(Kv0TL&wPK1|b_t6ijYGCjQeyiCeF>!@3s5*tGwuEfD1)LfFQfi2dx#e$JFe$F zCoLjX+;*-J9B3WmUXF$e_mm{1fJOg&9+|w&@T5Di{w$9J{;?|hs`iF-Y*81rQy_sg zHvJj~`7Rqts3x608Smapl5a-|e$B zziKSvAOr#tDA%LmE`x>ovtxN_sTinRo+h279~4&x-1!gJn7DL3SX!N-F0B|mYby0& z>&Y1#{p&nI5+L|?EM`tBWuIN{o1a(6)6MhKCr{T?{v*Bcp-4@0G2=^<^sYHf1(_)KpBd+Uitt9?|!Kk$_yvLQz%eyS*G0j*L ztK!uKGNlCyK(Z*TU#x9^3pH9TL&ME35e>Y9;OVey0VxWscoXAWjEyi^N0l! zJO>V`6jtppFg&gZgv6ofzo-Ht*dScGyv`m>-mPMd6p>1oVAuk+?y;QO{M2J>CC666PlrGUuO^>Nuq` zXr!C?Q0xNzPj#ePSL9`H4!?6BW|ML%P9iC&gKnw-4tR&U4ZAv&LSTs^fYAf=IIY1rBJ zz3Xx@;E?PM61cmylaHh`eqC%eU=?Gm&OyFeeza8ctp zoeHGI>w=_#cf~CPR}GWv+m)|p8=9F?+*{3$H--mJVv@5z2J^WL(FO9NGLl24 zaR1QcA=ll0xc=bXuhG-tB`I>2)O{>$Yoe0H{s&#tFL8b5GXJcbRuVSme8T!ioPEF! z$>euTbix3EW;KLo*HK?yeT1TOL%>Hg*F2oT3V9?=!^^ALxM#^i#Le81sSd7UM%;a5 z>K(G;O<`$HX!!*~!bn<4pegk9XNH`FT9)kn?)ai4O*|*rU7tDbp6NH+YJ&`dOf9@d z-O5L6qn;FpXEPP=-91V^+w4qKn0rNa4mI1+zV|fF>VQg_NOA=T{7XaRAS%V1I>nywBXI+y-v!Z0 zgn3{?1c_-)8_8U;ajT}gY9M9h}t7a;arH9`uC@4qv_+kS&G zML}W8qQc0TL$SOdQdJBwSW?M%5ewv%WSByC1pFdaNN zY2J}qURreQow#$IetGh%A=^H(l1C&0TXZ>p{*|9&8kg}(olwua=lbnZ=K@KJ(~!;cgRYp`!b`{Sm{_Un9b4 zflAro{ifT8g9!qDIzekE#!eDsFScWJo9!lxZ=Z~7U2o!~Ro_68b{F`#881GJn*T^$ zA5mYXS$E88j303H^v#u~s-fbNsgJfQ+7eS9-630S2BezB*KUknXmcqE9IP`a3&Q!GxQJ z_$lEEv42}}zwCEy2gZZR+1(Q6q8oI=P|0$%?7%2GGZ@My&Wcz>^Aj>1 zuA|5A(n4-qeW{LKl2%FaC{T#Pzg7hU8M!##Vro+58}Xi&l>2AXH19>aL%?@yE{}JU&&R6PVm?Hk2^BEUJSpNb{*wp0 zU;YF*FRdxLa_OlheHVh-mggEYuK-7(b5-G*uRim{ktA94&R(SUQVu_)mf@L71cw19 z^@9eka>w%Rtz&Dwp~!4&7B(`BdZp_UoND_im+@>?hsh&F;cXe~4+&{hTF3#KrW8se z9q&(~#%`BkLI5>J;*$-2MSS|`jdUnhsG{CYM*|`zqn0Zu?Sg?SdKr;k(9tx&J}M_G z>w1eR)?7KWh{0gbOK2rbsJp4pP3w;Err;YeD=XJ*3iUD-b5SD71pXI!G!^L@gp@?e z(|FVbkAvTh8!uExsvHt6y7SJwT1n~6iFnw24w)5mYH^Av4V!FkBFMRh&X?!Be?Pv7 z*zaPym019$BiO#%fo!z%3HEiC^ts`J@du>`^?ELMY<=iQU}Y$J?hF(r*e{(H`}TmY zjj(`J`u7q&>X!?JSTe@K*Rb!jbwznGF;Vr*NV%2h*`v?R1#;0NK-q^#7I0aTg*@6v zg3aFfD)76IWEM^7sr&drG%T0PCd`ja!vgB`)++Ut5ZK}aCI~P@GElq*Ng10UrHlUf zZ4_Ms8RVQ4XQT)zulx@+m*t0~u;~(Zu)=@pYog;qK;Asv_0Pt>opCE?ngbBaHk2dg zla<1IWL32c)~Hn7jqaA~#2308AorN)y_l>}pw0X}T-v++^V894uS@YyXYLqmnx9)i zCzHXebEE4;^Z9G-R8uz^Lu=&ZUQq2!+9(Aw(fq)hOcM;FZmxClJS1xdVDo9}cFs*f@0jbX4|i8$}G!b^xNZ^A_!|coK9p z&~7DJL6FT(!k8t#D%eV;ywEpjzLfO+cR&qw*ArW(KJxNWW9^T_^D>y>qPF-5)MG}) zbT$%#Vx#)UZD@Z$nwF1?bwueqd+47qW~%1{fkW$`C265rLqLhQyv>@SM&L|`;yMl# zsf?W$AUJt#rkwqe49d6*@C`B$E#M4hXdKE&{u7fM>K@~WXqKDsx@tlh6m`TWe75d_vaC~Yt;4(=9&VOj-6SO-GrmY)r&nk1V;3N1D^`QzNZ0kzB zaWKCO&WuumZvt72#baPx)*YHZ`5~D!;>#w=3^w@Z$@KN-e&|%ExKad#V#f+B-?)DT zZeVXpqlA&rOtc?Z0dRHmhm86^FZVxL06{|vC~tWWZX-M)VCA7i&DO7@C#0ATxV5C zpwO)+sfA!Nj@h+t^79tS@dm*l1G%OrlY*5rZCS9tVEwwUtDxb$$0k>OVG>;b)618P zyY(m~{e;v8puk)Q6lz^eP5EV=s28 zU~sn0T^-v*8QMUzvj9{H3>-4W-@0;76nfi%B<1o+zsfJ1P70V&%o$~Z`fWf`k+dCf^W~NR3&$~F(Fhv< zv~53Z|GaeBQ$<{8ydz^O3G?>viK9%!YCL{aahBvqnQw!}irfwQs63>fTOp#v;0Jk# zemO(QrJftyeT?SyNQ-)Efot zsfMOwmAf5Crd4UwDl4t3YbW!0au{o}tuyxgjGN>x|5&s9M%&@krmEk{>##)`JAezD zYZUYHVB7A9TYJ;s3pfU4=^#VQpVW8>eN3h)B9UJ)p^x!2#TYb@7+wDw5cyJ)Q<3-+ zENH3iG#ZcMh3D>N-i$;iXLn4D1>9ngjpvK$ZjXihsClhA|Mayi);m$cUsO>vkt&$J z*BdT5zgIubh2!-%!)v8)zgS|w=xKS+o;q6W9f!UD>XfJ2svUiu*Wc~!2`m0FLv$os zKG%i>ix=cie0~IeHyM-%$Ye+k6^m<7fj3Y1Kqd6+J9%fyKc95_fw%(w5%i*%1+poJ z!a$f3LvQvixuY8;;Jr?aTH`M1qjpTKq7~1Ei-B-Ct+ZfF; zfz9Eh#;4(Jsds4<&+B-O$qPCE0mXm~-T-~!gkt~Kfl281 zVi+j%ZQ6n{j)9~$I_}Bi1Zcb-88vp#8^K<;`Y6k@nXi`ZhJzqmy6w<^sZBXb&Y&p0iOtCM*IAQK*0xtQ}fnti5jM9WDY-X(9hMaPG}HC>l{@GYYIy|lcw z!?!=utEN{VccHX?m@(cbW^HR0^emaL1DUeVV62Hp}&T1gwz1cSW7kajA_psvP@{->1# zx=s7#g%8UAX;f&w(eC`(94#sV+dU!E8;~PfDdsdM>J8E-ZT~KHX=%bghXQM&!rs2K z+Z{M%wE*qc8It|CQow$U!)_=K74WlBRDj8TycmP5W(@QiO^Ny|Aw&3d8kkRG;pnX+ zTn99D{x{zBo^9Jy=s6l^KxZz`*}M`l7TMJ(@nP$LvwKXGx>arMPd_G&7Ca; zqZ!`I=57~VUT$8K7VZ+S!;7??amxRl4S^)+3;ASJ-)3Vi8X3lCk>{yAwDpLplk^Z^ z5PC5+i!RjO17JQ1*R=0w0SdEw6^Wt=1BsFDXLr--_mcZn@7Vd@aZ$J=?BHE{g^9C0 zj*J@bQ$D+$OJJ*QiMqXYr)P|-V5u#1s(bNsn$+mcy_3n`iNbf2fz6-S1D4+h*SKm7 zhN^D<%s)yNGku7sPp&#y*1J>@oU5NLZ;fws{bpGOu{nkjov8N0eJ?qDc>vMypRxc9 zO-KXNh9OVJJTmF0@1zc#8weicn!h+O`iUJf2ZI*U!5#*C&bWjl&XieI_A<)lHE5aG z`26nt8k|qR=$wcq#Wwq#8Be)dpb59RO}mBT5C7~-3e8H$jl}UYf4`|+JN9_g#9D-I zV|ij@_CRtY7Q!Vlw{gi@KJe zpWlIS3)!xYj0zc@Btj@&LxAE{YcD zJW*yWf_?SfWm&!fbO<5LW`WcnDd4meft`$sJ_yo~GXs$xuD?`(LocEse=6>4iNO4J zS->Z5-JK!LjN~e3f_T;K%42G!$`^dgCINdBE@b1^dJ31&jQ4PKMNhVW(WP?`86g<{ z+0!mpY6tQwsopnN~F;)XgIT{ow{6L!`5QLc`&z7g?Jc|PPXAn&m@vRP@A+R-W8Z#|= zB{47=QZB`xO&+Zl&2&*NzOMSZ9);h09US)5PEj?EDnD*H;5{a%;mDNLoknd|z0Fg@ zPwzJ86q(MfESzLHn=kZ4&Ef3K6sODdd2c}}l_tO+e!lAD2Gpi4Z8+BzFhq7haEv5u z`X5k;Bat{lJ69mp8{xkaghqGU(G#Oe&@T8pg7RF2GHSpD^DQ%Y6;+95-2r2g;2=NM zVU`HIs|oMa0yG`6(jNb63&Fy+G@^(4;c)v9F{>4VHJeCuv#p=%&=d$Dev_M=EchX- zaNdqC7!&Kce{IDIB1aeUL|q)@`uO^ltwd_h2{5`1Su*{RS_hLK%eA_@71pmRQktZqHWNyl#I_@ilf+_#PbuGDYG}kw{ z`TGa#Slj9Di;g$LcH`QAGitJ0%=zrJjFOms?~P;sK0bOGCls|SHkang7yIwNBe_D0 zf`3J`5koG(bx2musR!24a%h&7jOPYD&w97-9L%@Elo1XSY*-TY8+GT>wb4&B@HGzA z^RJ9vYr5s;yEFA@s5Kmpey|!(CZ@-qcu;pxy)HfVJzZ(6R$Wwy+A>>FJNLO-h0yzw zkvwUgU6=j0u`_D;6R+HVb~zEVD>xF3P~Ah58XV;|dw_iyQ?(L#-|+Zu9VzH0<-Q! zO~)h3$AEYr0uy6Wp;`U_rc#9ea_&n`Pk`9z;fEz}bhrvEl+)c!Fj z7BH)a-j0D9<6}f#M=go0nE87iwT(6YmbP*6xj{LBk2k#I5>R%G@!%k!nv4vz32}7u zn;MjYIE2h#a=WdOitg){8l;F{AIQ`<2V8D4R}@f#Y7mwcN}-VbmjsHgG&3EaS)_+U zs?XYSxM5q!hO5!F?K> zGNby$1zs4sG!*J}a2_r^(bPsKcG>zH)waU%YiliX=M{vZv^^LvS8}FF=%rSSO?H}& z!#ncxWu8y^t2qw2fhpGK|dzX})EzPZIJUOVEhBP0DQHq3hJv94y~LT@?^ zxT*i1CpcZ+e)r;J`4pR6t0`2HBv6Pi=9tGiP|6>?Y}w!opuU8s^hcna#YfjUzDa)( zo)haN6I#}n$V%I>|J_VxQO@y498-3%4BWDhFPc#pXiDJT0QvGoK#^u;@Xi1! z?z9vKS5&|6&l*`d8Z)SwG)OP>poO7-f~ib_`-5RFq!|E5xlF#>?`$gKo3{S=tO-r! zwxCdoctrgeFP0KIG?5C27Z_B+UA|Hm=QwuPy9`p zFENxB%X%Pdlc-E;7)3%KvTP5khDL5d7N6qY!K;*qUBS7ZllOLpQUNi0PKcjwgA*Z) zG@h=+YP8ZgNNd?KANm~jF*7Ew0S$p7fYe;$Zne?d2~gcQ)3>2#@)7mi3zuykPt7}_ zv^`CSKiIiec%9PIeO)NL0)S&3*cHTSe+GRGTLyVG`4=FbVu;n>1Rx|oeL&1omQ}iM zLO4McxPMH1K34c4bvtZ9XpGM0=M!0w-*tqdvC1IYceUm?qN_AU4*8QTC?6Bm?3()bi znQ^+yY99&sUu|Z6eBUgm#mke;ur$;Bsq#jplX>qsxISj3zqh6E zL{<+j*MI6D?|$Ug-HwOqCxG{z>klR?1XaNdEamR2DR}q}Ji+1lwCHa4Qu^atL*3$v z(Vo}dln5Ko7Jt>TbAHE~>`dd>ZNPg`V6#gUZhK39H{#gg1cNN-avpj)T8`L8i?azv z0>VyCOn-ibkCH;9+GbcfM(X7;b`xPYalvy^DIKU2VUnB#rB4UuRT7ucZ>*2WYVrTj zLkryKJVirZp)EtdeTjn>WF_aDgR4wIvZ7=IEbxq0hDlK$Q#=d3zXj;Al%7j6oUO_& z&gXUyeJs%~Q8`*8HvY)b^+c03pknEG9q0@u>BCD-x1BV`X6iRr)^mmI55USIG~VHb z(Mbe%hp=L-;EV<|FOZeh(OfffJj<<$4bo`qIL_T9uf|y9lBy1A{aoN<=-g=57W}*3 z&3c{JNBp_K^w)Nbd=>#yZaYVNjWMfYTC?saTf0=Jf2uLY`v7jn-s9qDMj8C|%jr=C zexU~u-P{dn+%(2TyJ5Cwpv*JdfuJIh?*be~Ei`RyAymYfLhp<;>zrTG_Sc*CoW5y= z?4=sM&VReZhoA2x^`paY&KRck&}!KPtJ198q^XbthP_|EPUSVQbUoMPqfE9EqA@KO zMNBZ*Ac(It0Z;+qsxSdjAMz|Lt>?VE-}s4=}+Xr4W28Qs$K}+xd^LkZFi+*eJ2{oS#HEX zF)gdWH*>mJr$Zin-@oD-H&X31IWcs8nNU3|&%iezexPd|xy>WPco2M$^d9?-{;zxH zj#+dt4RqMWN8t1=FyxVM9nzOn$anREJwF}ByIjt`+lX`8R)WqFcvwd<*Sd*NBATnm z*giTFb9&jcb6!}|XNys&pQ+Tny*Kt_kuP(DqOV(VT`UbtYyb#?Q~|w{$|Avo9!jD> z8vS~@R(0&H5Ux6Hj;NsSraRxP=!St%*_{!ApbtY1OmE^iWLtCgUtF07x-hqQt4hbi zXj_IHAL?NmOLMEzCjGkT*D)wmQ_x=|8$6$ZN%_GNO!1Yx)>UzCiiC~TOM$zDu=`{~ ze|A_MwyrPSe>K)8cT!ibF;;v)q23;#F2AK*h92SUKUm}nz4xF1Sk|!L^_!7ya+|cG zT5$Xp9htboU;Knb3TVeqHil)UAcXjx93mC^4`By+vVEzXF|o(GSIAE-&` zxtAxp>knM(U(X6d7O`@;0dcAUV7`0ZPZ~SSV?rze|M8^Za_cj$;}}OycSdpb z^UPAdAWiujBk^~kS7p!Ah>`@r_|I-~`t@Q+2H)$LE!;-~gmh&U^1&NIJau5VQ%6VG z<2+@9hpYWWhcY6_irWURNeiMfYnX>i_WRPX=QiboAkHzXvrB=>3n6(P0>GMZU>34f z-!f8YhU$yt=H|n`AnS67%O_rsmu2@UV3tt-n}C3 z+MksHzw};MU37EgYj*U*k*Nv`o5A^j;cM%hkJ{v8%EdDQb3SSJTrVnico{c;K1y?Q zmjMSJa%yUXNhXIH)YuJ|Q$*5z5K@BI7#O|>(Dto7${D^a22xUhbZ?s*XMz3Xp4(jh%g{@hSKJP3yVtP^1VyyDRxUbyd z*S_I0s;gWY8mG&q#6%Py-Pd@uIXrK9V@sliJq6IDcdn<#;Q#Gk^%5lcm0qIAt`Z z`Z>_IElnIuh;gs}vRg!@4iDEcjRV)Crf8{4o{~QANZ7AyhQB;1)R?0U6*k%7ej#&}GxiC+MuQYa@2~~`x5=dZ8+Q`9I0yaYNEo+3`vPp{4Bj}$c;~c_ zv0_km2~Z&~iWGl}unsS#d|w^cEio7u45H!piQxd`k(_>-`%V%i)vcV%2!yYy3hTh( zu-`C}{xfm(#|gUOD4yz z$gtI2Z^IEaJ5&3!JWx8mPpf zfe2jylvD(VTC0f=uum97S_cadQcR(`PKf(I%%kjy7fCickgfMVvD4&P&e@0*eN~2G zI*p^QcOHa`rS3)3y1H}_0(~ES_|}vnX*Mt6>B#|~)0um*NN@EX1m8#AIlBN4hL8>M&cPQWaCc+)zw)KQvHR1mJ}nej|KPPYQaCT5@}Yz-S`0zHkmLyl1@q3 ztuw{hy@XBh10P%FMijbEE^$J0>G;YR$+!6B)X*e|@(nZfe50j?aW9y zhQ&KqXXX->hWYPxE@XDcZv2#Dl0Lf0aw_}n)y)wl;Stq+aBEFS$Qp@#+EeT3@v+B^ zDI-_tP04(@1f@xf$$;&}?3ugwo%j@=V+O;!o8k`mdQfcU#%HoWteb`Gn}a#=W&cUA z5lj&HSywj(mSHCCL3d(lPUx;cF5HJ3D_-GjXB(KyZbVX-Wxe;jm<nB;M&*bSd;WYUpMe5h$q>f6K#NUUF#l6o zU$B~+(~%S`)d}iAsGgQO8qCHiYS#8->JgsX7~qlJ|NPRv%k(3qhYn {)|`a+cX) zV8N_%n-GOR#&~&Tda8CWn7gqYBbo-cAAmlL6gUyzKs{1E$XI)nI}v~InTBj>3grS> zlJv0L8M=B1Zi+q+$PyL0#ngOGkp?Ezho;_fzS$W1<{$wJ74>yz1Oeekr8nn)soSFi zS;B|v4vMWJj#(g!r9lLuuXD^tu%NMuI6E5p2^)bVP%oPH4*G3?Ua5lu>LQAe9`OW> zMX~)Z64PXZcq%c2RGQ0^oyjENvU~R3bN}Jvd2go3g-1Rsy#yp9RBEkHZ1{ZwE0ZpM z-pD`xjf9ZEnEJCyG%wUkgL|y`Hn#V-Z{b`68CE$eBo$dxPy~r&Yf5p|a}(s?U5|T3S1Ii!GhJX)YO=2CY{p zn5sIa&;Ra<2tL(wb>I*(y!%fVhrofr*TV*IG0QLUU|da~c7Yog3-w7_&&?`G1xk&2 z$AZGch3QfbbW))5Ls01B8SS8-bq@wy${(1ec?ZJ8fwJ+%015e&LJRhtx1 zsKc-7JJH#Ct$aHr6zPhnwZ0U?tKsl%elmkf#18&yKr0OKFyWMDk2KtM+P74so2+}> zh$-`UdputHU3EA=KBaInkj>L5oAN3u3ty2|lov7LtCBZD7jjV8S*y@r;?Sxi{e316 zL7kH#O`VAv?;yI2B3d8r+%i&T%)kR1%iTPpo zDY!jEC8|`-jR9sZDM`>O^0dqp#7xr&NEAsnca$$@dAUE32ya=s+MFyk+{kyFX0^_5 zzt-gRdfk3zm2g0XH5`XA_?1I0T2uLSM4){Bv->Wk=g9BZ#wEUKpx>jtrg|eK%}P9} z(~SeF=u>e`6{9ZpE&lec|8n(i!0o&4T3gj;FPHob$UfcuheTvS7nt(ULkf~jjt*~Zn5^gdOUOTL%E$Ef47{h>T+y7hNJZnp%Ci!4@oj&{srNl(MU@Q7eiUoSt1BPDF&qFq}S3qM7@Pv*ctblL~QO8uEwt*WBDvVn2K)BThc}R?IAVf9`bk zz9{ogd1Z-tm9y)yp%;EIR^Su>r+gg+^vm-K>4I|(!&;T*P187Gn|H_~NDxUW3Oevv z%h6~P1bR}zSD(eVM&UAGAzDK9sS)c}>(t{3H3wAai74pQ6jH*^8{A7^c)qM|(r0DF zwx%TW-QI(^wVCJ7u-&A)y;Nl;2ARD~JYm3ZA#2pl)*i#wa9# z8?--8#F)dBvO}HhOd_QJ{dKdQ+IseZW&Rju>Hew|jY1p+VeJE2ORy`l<@X@fW7xE@ zWKwN*d-Z8WVR2eg10bRP8~mP}qW*6Z3o5EL+{o#f@vryQou1P%IPgG$>1OtO+8pRF z57fh=!k|SWc|FgbKt1Z#QOQLmDrE1140o^a7is!;a!Iez$6sR&g=DHK5mse47R<$Zpz_y}*B zGHMq;RoIY9bf&3&j!2R-{+Yi%^+V4ZowE{cYeeH&%Zv~!laq<})`2GAw(77cE>F+C z@6)Xalv?9Ywgh#gIwkNGw9G$BXq10?l#jLPlq1sAD%+fE$e(s2!QUL&>;!j{t*yOh=>PBsU!eXnIQ7&*i9Wkmc!J5nlOaKzZ*brRcPL;a{ObAHHxcSXJdg~ zxlDpZh}#oHmz-q?Fd8XWsgf;N;ivc<%rCp&4D;)JbJ5PIKF6Bk@gX-Jzh(Ooa9=_D z+>1nYt0A~!?qe{;&J)U8JPNDT1wz56T!jXupQ_#F@sQ;rplIP;T$Ye@!2LNzfo69~ zU9W0}njf?8=6vqNo<#>t9*Hz(XG!E3s7r{J>xK{ZAw{A z+_>8@#_}cZBJtlmjtf+LTM(%AfhG9qW-$vJgiTHkBl&e37r!JiIlrtN?0huK9dlnL-j zfC#3yjo&9q{jSel&U~V=&DiG#vR{vx>QPA9oz)1LI9)h*^LlLQknx(MqpBxR>6aO8 zG93ZadJIqjF?EWGhB&crNe4kNHfS>A=d1f_j>yQ40Wiq)D#_88KMr&SO6J^wJ+Y9h z#$i(H$Nh{ehfG8R#t79GS#PSK!t9YJWu@((_uvU;h?kVV-02Jd#!3v4h z0Y4s~cLr&RzyKO4enyg` zjelpr(}PXa$GIV1q)rezn)YL+bgW=efCKGKN9WF~cuK z^vXe7%ES#TD=H~qQg-K0c0aVlak@Rk{g9eO>R>CJ>qEQ#$_Fnr9#P(-7vk4 zwjDQIK3JMxoRtMnt<(PN{wrklgKt{Qlp+q*w4%NzTSsRTls6W%7GYB9H+(Uf;5jb- zN2n=5;Ow9IRnrtH0g=W>W!wujd1n__$qE^&PBKA#+j=j8F@Z@nBLb6V8;__QW}`Rt z^<_u`DiUnW@B?9q4o!>!meN$1y$Oas$9Xl4X1b|-x{`P~_*c7+KWq*=PIukEiwPlJ zdV{+}6#WurO{@Cs8xb8y=Hh9ydq|FWiOe8GQCiIlBb1oKPlB+S;2GoE`}TmKUf{oGoMh;Ilv1 zbU5&Wkc5dgU%m;XHlY(yPwd9b=PC8?p`3W(2JS{go6Lxz?CGnA7+@ zgCWQ08EQ`qd1&J*9cLSA(nFp8d`hwD4f&{1 z{wfemmGU?^Rcj}zT=#RT%BS}n`F~(mN8r^_;-T@AHZy;eaYa-6I=^g@;`=s`hqGF0 z&Zw8%7P{jPenJY`x?#?0)m9t~e!^l<+4Vm9cuu%2ODXkPy+{OiCV#$B3-<@TfYSW? z7)PVZUPsz-?Q$FMxBhI*TvXgsSv81UJQXa3j8GMFX1?xWVuel}0c&eU)-rFMk9~B$ zmj%Fk4*i}D#D7~~(Ep#M-|hcVbyi_ffBn~{gh50alx`%XyHh|)MG=tBLAtxUySqg} zKtj5^OS)6KyWTzip8vsndB7!{z|42{{={1M!fI&`sl zsBP2Ta$k7UCd2JSc6*qPfK2{Ek9m7knheio1*RV}#7Up33Y#mmv?P6XG`{6*(*3(@ zYzOsAkz4=j$BRv$$_?qMIz^k6!aw&TAsm&P{UsCY`QJaL4EFrIHOBZ<+l;jFIy!cU zI3$0QOjAnaQoi9Oy`a(yE+RGmh;p@MpN*4u*AZLat~TF(EP88rb2Dr`@o?&LHWzd* zKR=hu$rsT;)7ZQFp|h>1Rrxdf@nM^h-Y$j(dPcGQO0ldEU-FD6jZ|07WNw-VWzxju z;a;Pim6{&A?zKv%e~t0b%esqN8{GK*bEY8g(2T{r4k5ZAm!IGRmmJU%N3Z<1i^ww0 z9;tp*WSnv;2=@AyLTX&xJi^5hVY?SkDlah-c}qh{;5~cg>j-_*P7VuN$4(=epDklM zhS++LBkE9MTNhHlFJ3{x?oUY>N?&?XmP+?j^h+7x)v8)Wr?xypQFn)5`P5`CshkG^ z$0tCki7`IcwxaZs4S_|valX-|%Z)9@PUEYK`nrxz&A?RWaW*PVVDVQ?lxle3zke=D|!a=KBzJBHC7A@SR33;T=eSH#m9bOSLz$(CjJl8iZfYXPID7 z^Fq~q&F25Ir=W~+ARXsP(U#2!$%#sklFM8Ixep_bhm1C0`xqXRU1mxB;e-0Bh$Pu} z`7y~Dl3VFs9)QupHV`w%R!Zz*aDG>BBRI*85k@L5P#0YA4c=+NU3!bT$NKVs{uLv1 zR>E1donm0r_iwmq(okn9p=m+mN&t~T%u9jQNcQVBa9Sk(LLMb8@Dd~JL@HN)H}&o? z>kn4!F@WaAi3vgcR9Ht)l5y(y#GZP;RDz8XkH`>IY^xsiMxvC(TOvdh_hvUH6Hp!l zf)@*~5+j4%HSt-pzvU#3bX1pYPtSIsy+3lj8474L{{30I>O!t0B=dL@BnRe-j43R6C2K?P&92zA zhOa_biMni5bb?W1WJz))uaHbp>7i&7a5vY1B zCgpN3WFK@lNS}MGl|VhaoqK%uhuQGz^W;{CkYrqiOjw8Gj+ZIUj{SXUUg^PIc*H%4 zTzoxVp;7LSku+gbHKFTkgJbKhF^S5H*abV;dkwp_Bf_&SO=+mWewNK(DBB7fUHWuv zSivh=YHsIx_ENOkP2u6dS13^%Pd^XH8K%}0i}M}MoEqlxjAq0KsxfMPIMEWk>gBI zNB(^PJt0=zTv97Zqe7#AZDLREwF}2GZCvA~D%GT`ObQBA?gHTI3U0dv&lNgGF?#HW zj&zjK;Da?Sn|;Xv!+iO`Z4QMgk{=d!$PJvWPD#YyXlhJXx=6X zSTrw4zLApUJzBdHsxdVEwL(WwT4$_Tzh8_YAHZHr|F&;FXXlUwm`BN8X=tBe`OJP6CyKxk2PRxUzDPS=4ZGDe`J9=!{ zvs3~0;+KQ0$FhB7_QdwysDr{KX!6hO3em2Y7pemk9BbBeepTz*RtKPxZ-TKAH;7BH zUXnY!-3b2V(muMSv2SVPM7xziY;nb0u_>h7;AGw2AxCd{1vW0Jf_X1G! zjlgL2Lm{lwYhTCU0Af*Jm+(X03^8}zw!oG2jeua1)h?qnbR1*_pj$ytl~nu~IhHcH zev);dcB!DUhlt_YV0v|Ml;e?esnE}0+xLAas?Z?Kx^uxXmG1U}OWuO}1q!^UfJ!TG zp3NnxY#&|PEmF;kl%K?Y?fF&}2EO1XT4|bBNpp5NvzuE|A zw`i_e%8^35Hd;=ToeAL5rmM?LsVvGID2nV|Nn52($FQJw}R-6W`ur4*~M6;dGz(z=HE> zs`~HmJeno}U5l0$FRZ0;|}8Ik|OgGq_MZ>v!nC4mT}S^`E$6eSMesFN8-L}wrEUJHly}q z8MV<{yPtNf#Qa^|yv$5Yj+R*Dd@t2LXfE z+3iIPwyJ$Igj0crAaU+@h5;ACHw<(_PRt7A5riizsHy9_vMO}E;m30gu<1-j<&?>@ zH;u;F2;U;*t*B_(gB{qRslFHd(D7Sfa>~;>$;-?`-X}t$wWMHu|FnER$5#3Pm|@p5 zPBTSz5h{PSK4ai~JT}OLUfymbYIm6N%|f+hewo*@APfiSQU}t_JKLo2JM1Mv;@5YE z!89e9ZYck*oRO4JKKnR9mc(w({CBWX6Zk>>;Oz1upa>|Aj*gy`ylG?x;L8qs0G%JL z2|NpTqskg6ppRF=Yys37Vd8cPK+J=Bpk%+eT2FYgauG#N&*rt1OBc|0l_*6jmmeLA@ z5`fH8hS-yB@M|P+&m=#G?p3JYj%)?I`8+{?hLhCtdI@EhY>M?s!T$-OogrBw5r+-` zxyZaDy!IpYgxiMP*=pr);wGOhofn%B;=ghl|N6bc*NW{B&Xi_^IMDeA@bw3shXA6~ z=QdqEl2&n~o9{=iX(i`bhflrLv<0MnYB8Wl9Sw_srfbNIxM|DA)0K*WdRn164jtIE z*A;`Pa2|ko+8b~XQWMiY4Ji;pGUd2dC@-Rd!Yt{*GK6{*p z{QS0@5jMC%Ih64yql1q>WTi^`hV{!>Lxij&n2S=hJ%0MpK8TbrAf)i!6NGFvZ-%G7 zleFVa7wXd8h|EFURs3)-M>M*n?J;D(`=ng8+;SJ5!BfrIC8^}y{N%BUdX+iN;N8-& zU4J^y_}<~>Na}lc6RQbVPRgG==ix5Ci=l+_r-WgJxl_bBioYid_5PqyD|)ScRnHXp z*18t|%_1{|vuH-In?&f-Kh=aOmX0BgEYqQZc?hQitfYDZgo39}LI?vWqdFmFUEiJ+ z!uc8~A&}#HQc_VF>I6e{5-@zfK1&m$v`!JO^4*q1PM)`IonU}aH8xcyKOOS4 z0k`nARCPwX;7b`cRthWzXE`ElwZqgZzg|h=fu6RwE+8~60IeB2J&BvvDI4y1}I za^cnnBwD0$xQL4bu!p*EroH<_@a|BQ)33#kr6UGyL;Udx3BtG7C}5flF;P$Qa^5TN zj#)>f#VP4EOw-pRq!u(~HUxa3lK}u7>wLz30FxB11|0G<&0!MjX5dh(d>fO#9Y~== zJ;WMV2l=w8tHTfNm)gOk_~@WKX_AI>xA1i5aLb&+Ns*sAp2vsl2fY z^J^$C-Lx@oxcCFwYD!94mlhi2Qv~`9jDMH8-e>KY-)KnzLU@$L>-B zdKK?5FI%${0K^&hf=4u1d<2UBDC>TJ%QgUNG-+;&d70wZ>YGbt*GNPUtV&NgDw6_r zN}Tj82!?$GyJxdaKx|dV?A@yjo-GMBIHb#Bue2?CZUq?(gF~&vWM#PnS0FrIFp;Ra z0=AXZh+jT%VZT*#GX(>$(Y1RflLNPZuk3^x>-p-pl5| zv6=6j70KpExQGhcg{12{)aBbnlr3}!q0BI^uBGT(W!c@cqm< z)|Kfn*0e-S6Z+7|M;S+-QN>SULH@NQducCGsv0OT7U8sv)Z6tT`lISMV`|GWf z_x>IMw3}|pUzO-Z<1eOimk2$?NbvvU@YxF&*nA8#8oS+!JbZ7$AbxAY@t!oNhZyV3 z<4e?o`4m<_Hf2NhA zDW!r2WeY-(DxABXF=$mFe>xoXJ<(E$d3#F!OD0>q()iJ17`i@tL^;7WIZZGm5TDA9 zB^KU5vj3E(S5RN-2)kYn?lZW}lbr?0F)`#~cyp|jJ(7wstfg$4-1z@`d7C!E{sp-WXw+5(z*%ubqkndR# zWW&I^FnzV3Ka${#W0QX=zsOUDZo$c7;H(Y>n_+U0tDuo)1Ww<0Yc60C&s6?|CJ^&l zWkn&bJbxs2$Owi{P1u-V?pmh=;&FnP6 zi^!!GZJs#Z@=pkXrbg)77us2PLe2P6@uWLa(Oh4#?c{M+i2`^C`JVVS-9*-Qy%{a$ z5_}!A=m1GM3wZXD@_qIYE~AQk6T#n0(JWmgw7BN`i&Tv{&l+v8;!`U_3dT^^D)%b- z-#xhOi8QjQkC^;tbLa5|7xKS%(=8H?8(q9i7M~qd*~x_8;_v^ROOSnGrQ;KQX*n1G(4yn%C z`tUqWRsGdbT_Md}R$pzo#7zC>Y%LouTct$(7d-*dV)^rNkQI!ImKw(9X+l$5-j4Fe zXiUPhDsKpFcRUdt`|>f@rar-0KT@Bwz0Ei6MuKs~9Z|acz9Ru=E8D-t> zET?`+oc$qbuDEEF;jiL}P;b(Vhj}@ho9|enO;h?^?8&xRlDC9K+zv1YYtP-kYE?1a zzjwg4|7rhvNkEE*>a8t5&NrD2H%UId?N|E+;_){9hqxp;a=W;BIP#-JFRm#x-=Eve zR=e=*^Z!m)=P{L1HkL0m{aHMC9{+dX!s)`1l*h4{K(R~f7VTVPCd}nwJWlvK?`0dh zkn+FF4h;(~nMCq>z3HV|5@$H<6lM1UnThOh8N%NoNC*h5Re>aCFR|)t_Mf$Ps*@nEckd*?>~?PWy*1@jouhX51qL~Q8>3*wUH}0d9U)Db+~IsJ zTHPP23~P*m&*6BYkb>?DZSt0$+Z(5>bmk11(hCcV7Zsv z#e;CovMG|5PAtlzN>o~^nguu?1O-99&Q+H{$LhhYvkeC81SdGF-pFi0T-uTZ*PA~DMqPJmfv7^n{_8$Y+DW=~IwKVm;#%ai*$h4dO%a;RQO5TQK+ z0f)7c)11{2x)z8s@>JBMc1o%V)Db5su*?2~;S+2Nee`;%8TAM=1zMAV7_B6pZb_Hx zlMR13gu#fUpupf{9b6qm4#5$Izp_x&t~+Q7_`D^31$-fF*ziB+_3Ja67fF2lj#|Ep z<@q`+V!WKx%u)A0@Vg=BullSHP!`khkG*JbYaF7N(5cBJ)aP?OYiy1 ztHai9OmE4cv7kd>a5rR{ey!ak7;Ut(p5a(m3g}L-@uRg5Dr*l&8KhW3OL``up^E9} zl@v~fw8jKA{PKG|hqn1elmIl&sb3+M2O+)1LdAo=km2x&2E%cc1x$}{+mjRzwF6e6knDo&d+ zjLqw}(C#d2R*)U90VH=KD_vJSeJ)(~1lc1g&C+3STG#1!wQg)(0T}QFIO-G8n!TW{ z?BkG!W1rHn4cDctx^=#Nn087U`8r)}mvcdQt-~IytIQRkaQ^mcsNu<}YnPvPdjai& z6YpvMR_o&flI_dc%cu8GC!4bDIdQsG|B&r7u7!NZxU^Xl)}6IGU9ELG1(OrnUou$j zyR4fJp4`b>PIp-8BF%H!FMjvFh&@}ZAKpj+v0YUQFKIFAGo{=O6}}S`g z&4cXbc0yU~YdQ0U>T}4$`$VVF;W)<6Ls{Au?N&GC7B0^sE?7X05!w8N3nuZoXg~c` zUvQNX(WI04NaWNBTGPomJNU<+WxuRvpY>r%&&bM5K)LF(?CF7A?qF15fQ)PNa-q~_ z?X>H<)>%HmBu@-lWYvB$uq5|aH1Mfy8rrP)P)s!>kLxH#@oXf48aowmCWenT6>9@) zZb{8(wjgseOQlYq-v>X|5%kp`Iwq-i!^RWt;l?*47KUv(nua9!IJ7HL>u4MpW>7tw z_|Q0@xCuY}EPJN$L+M9fBRdbC{Cb84Y5IXoJ8ZJzrEjj#OjV*#N;0T$sQFX7bf2eg zO|k(6=c51I#5N?7C)r*4GKKvU3x{d-qv6i)+Q|V;wmCnks9+{yi=2>v)H345sk{(; z`ab>oyh~OiJ= zREx|r8zV`$s~q)qC2$&qxj`?BJUhz zQ(zRh=py(Lr5ot_nb1Szmwrj7BiU%yGtlYGXRDpvm z>EDM@w)X_n5YcqcMwj#X*{u)X(Y(`R*Odd)qXo+daB7`UqPhqA=myfoOmXFzhy^xh zuan%~BAzNf++9nhPJ?-al&zMAM%rA*>q>O7{ud6ZZF#RNH(-Oyk0t6{qi_6(vxR(A z_pCG2uBey+yMqh@%aZ#T$R%`4T@01;QOh zQWz;v9#+aZbpx}a^w-fG{|5al4~bL)q+?b-Nls>ppFmR;Qh(0_gsp5`?px`OAHb49 zVpT}u3mYKYT=UJLry7I)N-fyy-s4FH-%fD{3zc=hsvlxdU-}mxdMq4?B`L`d$+pLw znN^ySM@h&x=Q@i&lq7k>4fbCcs{J5g+0z(laP%nlVGGC8dWTc72B45Qbd`my5*AfC zxghq@87tCMy28X8jV0c>zm;7i*K}ue7@Zbb?CLDQ32Ci?uP>&K%}_ zdBhS;eHMzOD2M$J`APqu@8F-LbJDXZ(FXq}R6YE=Y^Jbqno47>a!%86fzibFaNWns zx596EU3Jr*ip;igWa4XgAdrGlKrQ_{3Bs^I^~`u_z|3WgOA;#dc12oX zRR2~a9~&>EHWw0US5idXq519$0Tzkk3Kp~DE|x_HA)=-R%5KWp-xmkebXil^F?mPkn>Iy z%O!V&lh8b5%!E5wU^Xr{woviIIXjos7x&YbucbJaYJCY#OvaZ54@8!KySMkA&*Jzt0pZ^3fI-;Fgd^k18mSIsbcKo3 zTjj-1NwJl);G`tGM;Zlr?^1eQNj$G)=f!sAoWId5o@v8T!r^F!;=iVh>fb}qgQMkG zko9p^N%DOFuF&;FJS0p^`w{rS;C@sbje^|qbP5rX9ztXY5~VzU>ja&=t+*i%ev!!S z9tR=VGf%{Wh?>NoO*db60G1(X)?y^fJMWo+5XAI*bnaI3yJGze=ZW1tUs2Fxt&`BV zEOh~h7Yq*XZ}lEXS>WOQ_$&FyqeQKq$LxZ6AoKp~pfd6^4?wQ#NIB~u!HFWDq$&pH zgI?R9EzZc~D~WjKY1Cugk7?%qr}N#TP^%Ep0IZRl zWL?NOt|;f_cG}6}R_6Gok%GyqOA!x}Q#K)|b=KDPsc|=A4-;Ab{)f*c>NNWXNWb%Y zQE%8-^+N|ypwiM|FDSIkQ+GOldF}xlRXloqA+Bv8`Qqh7xtDb~*%W_Ae zwH|-}@sqOpofwR~1&l5VU79u=@sD02JyA)Bh#5(xiWOFTYBmNbnScI=bVNje4j-An zgPB4`#6T0Ct(=e&YZe5DzsPn}hS(S;?p3y@Q)bW`qvacFIEX{Ou9JnR;HsL{rl$-J z`No^TwV18UTn+iXy8V9H)h!NETtd>c5L#i_RrwmoVv;r6@h%fMJ&j@tVy%)O*}v`9J_*84rwYGzZ+zYe!(h>E+KzBU@drKVtYG zA8QXizTT~lNJhKcaVPxTd{TsvFZcd6ZJkXbT2wuANIAiEDR$fDYzo#3^E$R&j3@5!3D|_#wU^xor8h(Gnj~Bq4PB1oS z>fa&q3PD&L&fVH=2_~u}cO}otYutN3lu^ss@Du)c!hkMftJk71GCjM2?*vut*}bcP zh%eR6{*{X4ug$|6TjteFnjcb9Z?OJ)kynbXs&QN`BD(&KY3o#dDPB{P$=VGyXuS;{ ztPCL=_C(WGqstSrc65s&ou7{rwiJvUGK(yf#;DBeA#NzJH?CPI!ct5eBM&A0<9=-s8*pCMxl{8x|h-*{> zhPwBtr)scl4Oi9Bw?C%KRn#+WZ#=hRk|!Ray9U;dH-G*c0*a>p+ULN65B6GCTC9m2w%yc^X2dg|Yc z18r|dV69}#*(*;)smIGkf0j}oLcqd|L(00Y4a}%e{&Cn?Qm4UU#LmQ zVHB2B=|3ZZd7LHIi_I2;h-ze|J4r-KLP5Umtz-V=&;;=LC39(-7RVp<$F2YtqFn2r zjwv|^PNstVHwKyTym6`(JKMDwK|D=0#fouKgUV&H@%GM;CRA^3^6YhLihB$Xo!EHM zml_r-1RI)5UOG-5=wB$5jn4!m!X~ z+ETHehM>baj4Z6?54YLE`uVvH=4Ss#aUyRnvb~%n!S@q;RbTBkp~U77>#Q3+WD?zx z8CJ%E0b5!F&at(8dNv~<2o*II8bK=X_7s00S4;^b%kWfXgq`h*kj7qf+u-c|VXwy#^vo2WFjoD=DXd`mMs;AQ~3c+SM< zOpRH1BQM{$3}XdW3451gF4AF=S|gcYNH=^*8u$5tIv^{+g;T(@$SLCJ5tOvKb@tzx zedpH#u&b!e#!g!Q;6XlQ-e^q8%fq3Ypn!3JT&bg5fXEtDG$BlZzrMXfm}hA=s~xC7 zE=H{BFk{GG(}AG;H6IVd>WyT;c)auJyWHKFgtz7r_QHF=eF zk;o&3R~-trjC$XqXVI~FPm;_`wK)+CH)pqCKXuK0WEX(EZx4pr@>}EdB1TL0g{81$ z;PX2V#bkCVTH~x^G;{cY8t*-QM^OYudHEb6-XYrZA0LY-TXn=yaOkIxeRE+8r@Sz) zQz6N@aloCuM1;AvV7a^prcpOQWwF*&O$S0A@W&Q`nVO!K_Km!NkH#*p#v>4biMQ;J zWFziRm-~Vl!&|VE&#`Q}?%MtY=!m~~(k+}82?P?&r(HfG{USnWI_Nj%xf_dJ(#~{F zVf|UNGPCt}%Nr58Rjf9#neZjXj5BCxpYZX81&`9nboX2NjE+r*64y78)k@*Vct{22 zm-};D&nIUt>PTYwOu#f}9p#=4Xy>Q>;Ke)@s@fsA|9K_gGpV9T!;6HK{E%V#lH>?D z8L4UBOOQJwJ3s9f$maaY(qYIh3Xw@1bDf;-GCljS>0Xh$N5kFu6Ms{;6tw>ne+=kd z`dNgvnKoE5A{DoITSK0<{rzb?`#yFiaZKNuXY}WRE&YwZC;dZk^R`uK=W6xkUr#%m z!Vp_Gl=O9I^*)41NzS9bbRl z5bKIFT<(0DYw_=FH{eE4Emi(^wThDKf2ox~L>%;|#EgRo0_-j)kpE?V{p-bz%=m)U zyJM=!-IaesET>)QSx%P84^VGiM1LePJ{7h83tTWehW&pzO-p(ELZ!7jv)vEZn;Oqh zP5&yRK0^X>La9V8mma*^#Dw)GEbi3W`X8itLYpBx8t+GN|kc`ZtR{d zSKIoR={#hb!}r;~bP+f%yeJqX*p@>s!gXWuXGeACc1t(tcCRhhq1WA+b`d-;8?|d= zjH3t|9Rgh0hO0qqvIHA_Ej`<}1GPbH9@g`rvVA`8FT9->OUo*m3J5>w@#y&f3FSh( zEHOo!Z$_>UgDu4;-odVHRm_$yqqBDM>31lD0AF$PRH)^2DP6$vd4d^~f7}aBzE}L+ zULXcZg3Xev5l@MwbyA)ASJB1?i)1}03eSlVp^8~c>@VDO#tNFQg@Z{Wg(HiFpJ= z04|5h;CzFCh%+Pl^ibACcf)}y@J_;LiZt~#hzjaJ+8!(OK|s|fyXaGY0-6a+{)8A} zX1tSdq;D5$Sb($Wj~Fh?|3Ni-I0YmNS!U{6zC@H{)%ax72w4EUUtfmIGNMLy^4hUU$u(E}btM(MjR zO>VUfr;{t-c*zGaK$%o4G}AQot~?h^Dbc%{^lr& zc2l&_^-(AmU&%PK^ZnfnQlGLQivQO0T)$Y-WEnXflcP}UFx&C%PSQ@-_#(Yr- zwx7{3U~MN>i#n;ya9!}Gu66_q%c_Iv)i!u)3${Q?dI!-R)B9mMEGW5}58Tev%G_2B zYjDi{qDCh)nZ<%@#olLtd^l{n{Ryq8}uN* z^S-+Th>Kv6C{=QD3o+#-FnNC~a(Cn#^?al2+7qfAWrXRY?XpdC`5Lg;rARsQa7klGjB`HL&NwJJ`l`pgb)FDeRF$_pW>6(%`>@_bkxvg3v4>b?XmI>IYn2;8FaSbMgvK@N}a1%UEC@Kf>t6`&X0)S!F+a-UQ@b3 z1X?=U)S4#L^ao_V<&YHrVW~nGOq5}S+$3BDDyWd#Q_Wkib=4{qwCC~5%&VL-tcs?F zaud2gxeb?5n0NXKjL31I$D{}^+R2*|N-_(pt;cHV;vBN~GJvh4vEmV74AosCpx~<4kz6JU(H6v>=!OPc*`V5Zf5V z5y+w{d7{wMO4hetAP2Q#O6~tIiHTek)EK74ufM^5%Ofg~53|f7F(}3@31_sY<#sI9 zoFb0I2lvb!`Ui2PJJm>Jjsgz#)#&oLTypX41rdKQnj))fstRB3CgRxBWTm?~cK_JDNc7)dmLR(;bal4ual z8}FJ>{u7|O3sDg;wIS$uewWpYQU5TUA;d_8m^Fh+?NN!h*ioIzn-i(C=F`>SR-E6r z_5ig@%cdDD~TDa%>CN{5erhv=YHA;1j7ClHQ4y4}~g!3-a5u8QsPfW#Ffy zIOWQ)70!4NEX}sJ>@U6j_RhcIY`t>PPHy+&toQw#8NK z;qy0cv~?B6W%M&&PP$k4GC0;S)f;?1EdtNA7+4b9|1*+aFB)qQR*D5c5^!fS<-I-94uSg<67#2FY40L%)lCq54E%eP~WK6CHmbF z>@${2uFzfIW#_0;#!ufRLBL^rS%%XpY8}W_fDr{IqPSMX1(ZSm>j#G*jw<6ef`nXd zFV8%r{e`0mm`HGpe?(&tu#O8@Ew@k)2X~)7V$xPhikV$)Q~XN9VAjFlwtRS-_50{} z|L03MTS)S~ki$>`#{S{p-Iap3F9U)q$9yr1MOMj^9B&OLdc`Ctj*Ux3uuHqIOZ;m!7h^pA-E9WN^Eo;>QyuPS zf`L|9s6SMGtsjO+-NgB-)7gNhI;w<5R zYa3ArrI$xVbu@))SqhC*ixOm7m}C|m${oOIIw{|E!mHPS0cyO705;(p2TXHgqyVq$ z-_Dz)w3hKX-_!d?elDpf7dkj9Pib9_+`lHQc&S+}2-Mz0mJRQNT+1@n!)s7XUsek{ z^{d(#l6uaMIFcledNuC0BNR4svy3UTBLX)p8El6mCr6mv4ypy7)Q4!_`Q+ z&KIo{rO@ssmZ6)$ngzFC@)fw<9fH^6LsFr8(bF5$o z-8%}ePDjrWl6e3b^982Km3US^sVs^KP!2MiUJF&Ul@wd0?Z$@&M1GRQmh&cM#dY8SH!d#Egh7c zf52j^wjMW)8)sz$d)(U*FvS9A}OaGYn;Xp2@#=zN6<# z&$ClS?$a!842th&BZ&@a4`^8F*Aj&r%k@&S{cds^C2oDJtdQVcKnFE8q~W{~AHxm( z?9X>Da8ToGmKwF_lnyF34kRDp;(u2ogyLPPmQV^12|+ewDqvkjvcK2Ff4?S+3{Q$}mg?X)>0a;qv+f7yM(GO)vSyKh z?+HH+JX$)yu0gtX!FE{4!f^|=PdGn()(PL;b)dH8>2nWW&y{NeRb_39?_>@rCF%DV zjTe(Yu+8R+Y;D4X+#~AdT^w_qsXO8s_?pY--G{<1__<65m!X=ws z9xQCLm&R!%WgNV~Z4+Dqj(>V}m#m))%4p9A8~{f2#~4K!4ob!f(kU1Iw3yL1kdIiM z;wPmB;7lP}&Kr0>wTVEj_QaMgKyo(yF!6N$%ckfNaL*q?3fOg@6KuL<8KTBA61Yx& zaoWKbgfsv?ZKd7z7%ttN4;{}8Pd>T2mj+7AB9Isu`jFCMP_Rm&y{=&mhwMANvc=>E zplrtVXIHf#7!(R77It9owiZ(s1sQako!v@U%?eUGk<`sd?i_& zkM5e+XP;NnLD1^2M-D{4WEzXKzKj5BpzN%348Qhw_&RofK79_J`~l!mK^Fp1rgs!0 zIsT}3fQ&N^6t2t9}JZmwgoY-1gOApCXQwg&o626y}XP z(?j8sw3(_lUsj;OybopwLSwgBvrXALqBtkyfzFH`~|0UtQ|G~H;y`~}q}nHTt2x&;_$x1eK{;~bH=Vy#8`Q=dJH z3TKUl1c7Ft{Sl=7tke}r#)5O+H{undISIPc%%q_wY#8;$jr+6Jd5v-`jURi?K^i*l7^%a) z?+6?0I=i~YE|)x6);;DHRYeE{ zR_C7LG$o}yw|~iJLk(lx<)E5AKHQ7fb#ye){e0O9fA|PI8O&l{e#wN#hId85O7|MX z@Jt;=R~Yc^^^q(Lcg%x-ci1rw&WMH#n&G8cn4I_y4KMh*j8r@PW$g=jmrPtOA{9Le z1iDwrn}kOG629_zBA=wKG`Omlm8Uii+mFm3!V4(*QOzoyMgE5|IYL1g841_^w5Wry z%S)%2mi0>z;|h3oFe{IL7?2>yEzZkyekaJ9Jk*lNT;LBFVjB$=L8FX3ca;xv1=CDk z-B|n{=iRBt@Zd*C8baVst-=C_k!TFNrZF4G0PoE&Q`!ZU4a-Q*5`rtKv$P6MXhWkZLAIBKsTC<_L{ZknL}$HVJcpb^*5ihm)a$Q0mZ5PZ z0#$FkJL%sBMwe>E;0O?3U6FL;{YDGw{x&k~B}c$fMQHjqD%Fb&v4=Eu)62aXD%xT1 zIe=zyOLzhT3XlyGkiiwWDuR?_yA|D{?4}(vn#Z%8Wa<_>$}avTJosZ+=@?^@+dX&| z=lAbB+q@l=F&95n@pgObws!-BJnH5wqEuHnku+@nQ<>Ql_=i`ICm~<8z&i$2ArKr_ ze$)t1&n$Bc^;mJ|pA^(+N*wtRBzlA&pG(2c%1NT@BFVQqiHxWdR!UMhdXsPVmm5X3 z#nJaBTt71kAkpwhxhNJC|7_1B^`vw_>NYWrI(Atv%>H19-P3JK(@{1h>;5?rdr6Dl zQ_fW-Foh^4RdbBG(D_5or;J#O{C_NfR>A?`KK$FV=*#&q#rCntg!ya6NPH9DbRc%q zg0%UU35U`RorBGuXexj>ro2|k)=p>OQ*!B7>&btAG~r!=RN@0qLm$1td#|umB+rNT zh6A6p$S6=FaXqU)Rco4Qcg0u5x4wSkaMdh6lKqpv|Bq6HM-P?Psnr}}{8g9iWXEc;Ju(J@^uaZ4Uqy3c5z0^o0uWp-P3q#zqf=w8KKRrCV0S#}RIDSNe zT%bxM`?E{a;3$YU?o8VUjz%dEDN_ip(Qz2ZX46P_6My!Eb!HBjFK3d&r`eLxd>}#3 zub9b%-BcaST;-DQEvPc$kd|b$Wxq{X4&SwQ(A>z@MXdrJk~)wRB#!iHPHq-(riNSG zj&{dDD(y}1lKR4ff>)l@guVW#05*H_%3@zkT=K!}fgK*LVlJJ(79TA^{>%Z{ia;_yb)9> zKL>;47ej+=!r2tbLh>`4lHQ!kx%H@p7Y47<$|jVv(ekw-i$rag^(xZsSrdi4{%NM)K*!5ASwCEF|^I zVy8)zVbfiVZjo-YThC7SKmJydnoLWa4zolsMQKMRNjm`EHHqGdMZ2o!S04`q&(9A?R^y7PgO69zgMg zCaY-|_Aae>7o)^{F4yb4Bl*fSHC0s)J1(}t1hrw=jjItH~>pXk+)FN0k` z>@PUnpl!5GPJh1Hrn<W}z?;xYCd}KaReQ_v;IzDS0J}D!IL)sJ`vX%7OnqLU@pA2dsP3IiBGVJ80T}!++5) ze$HkP?raHAU#X4uGy^8{Xk}#hOOUdJ<{W+*<1BL5o`%M`rptkPvkXgn<-$m6a^L5g zN@|PQ0IoxHQr4gpuG%oJIw@sU`4`C*0eVh2n41VkOq23D-f**6FxLt!RaJ70Yp8r1 zn>Ly)@DwZ?M?s#X0V3_q#&ofdvwsAMz*}Z9WG1a*-u?N8Q2YReT>sZi1v~D{xg65W zN#;Dg(;=M&Y#oTEa*%O#gCk_^`3I!g(=DvJlY}!hjU#*`%HJQMKk6Nw5krKcCqfhv zQfUH`SBanGU_UWod6?405>wWRY5cN>>3T;@a=lx8{$2W?x=S1Z{tWGC(0ls$t&D*@ zM>fQO4yh(LA%_)znj=f3Q*>7pZo(T>ze#QWXvS~V%Ie(P(H_QxkI2t+tl4K2?pUo` zaOpp+=R6=5Y3eF}4^TjmOK}eT@-AN}m-pnkSUX|bvsnITeayKlPufoB>Fu|Ct3D)y z^nsdEL<;vN8Q(!FrMnljX@nDBXh#Axgx$SAu?Dop8f^le%%78IUWXQ06PX}@uBS_7uBz*M z%HN_r9I3W!4XN|S+rP|>R5rqc<6;PChh8kwvup_-TR3G;89PfZs{GV2Q9TV+Y9C-2 z_@&Bl`K)M@6xM=y?v|47 z?%p(K?dLt;cjk=F_{SM>vsmlCul(*-htIg@X{T1v>RV%!rIlNUS-wrL*6Qh&W1TaT zbvZ-rrFal2dH|!-7bCKW!MkXX*j@10pdQXCwBWe>%82b`kTh@UCTsWy0n69f^U&b} z##GMyGhK|~`0t|0%g@!^a=gs!QE4`|h<&x3_==`rZ#uU+ny-mSBU~M6T>n_}RilgH zpF$ji!X}BAdu_IX-6jST;@Ow*DE@oUT#Er>$-ezHE=N9az{lxZw!Ys!yNE(g-M2p}wQHY{sxc8hk+nrf!{rXdJT%VK2BE-_acK*s?MDo(G!7 zg2aH}aAKAeQT!Ru{Sd)Eq*lS?aesYOnL?Vce#%n!==S*{koD+Ggqr-RRFu{}fB zyg}6iazxxX=9Zkb?9E$d?aKGggI&h-@~9@Yv#l@JCEEfR*Opekj8*e3c#L@r=zi%F za^Akjt=n8zby=Z3U347#i^kf#dhg5q9(61(_*|n<+Duzbvdb(z>np@r!G92#a(21u z@@iWGkrMk@K9&g7zL`8wS*}TNNBjmho3Lxt@CB+pw_n0t-}FFm^U>wBrv3l~hM^wn zn3mtf8zI}h$d@!wCI7@TzEi#-w>U*{ zBdrnrTgMqkpPsC>H*i^zFnIZUaY`a6New~DtgIs$G|_1|uXJjQ%~ziRUEQG7d5Wf~w$HLB2yk zkvezD909CgNKo>^aH4S^6%N`XmGIf95V5)l=xWnDd)~?LSq$SVvUY7ojQ<`9@<}s6 z-ZcZ7T6BX-PO8ASU9lXgD2AC@#y5AIyf2=@Fx&!)_!Ix!0zUkE>U!HO2dnV{B9#xD zr4T4n@t>2IzjJvM7 z8T9vWnao)YJ`%1pgOsTuV>W3YD(iM*xlH6iY*Iu|7OGAoBho;z#K>VQ@z(LWPxW?L zi8C@bRWO!)u?i#rn~_t)#Ss}^Dg-U*R4PR`NPY+mZzyja^AJR$MuH!O-Eiqsn+$CB zhGTNR{ZkTckaj1Q?@Z-)>^7dGjS`~iUIgRmVxJ9 z-mhFQ!xC};J6ZCuoK1$78MqUeg0I5(h{6Fc*CzUUJYS1r1?&KUM95fnN*2Sx9zgz< zgNQ*Aq7CV0H$B`pvNvP?wjy#Q|L0<{uEN|LIwJf;ZqKFO9{nGkf&2|YOsLBgefaE4 z>Kv#inhRV3*2_k)_|V zT8zwq6FR-*j%n3-1MO>uHkY{CI2�CIZSlHrBBKY}aFDxaY;c2T%|YiY}uf8+c}a ziN}YT&;cRUP@DyZl}aI<1_pu6zzXC~8bV$ox;zoci?v?O=wwF(jWD7RuL3iOG%kyC zIuyI$nEi(78s~~`5DavE{gJgWirGqWrb8Bh`jmKr3#G#E-bQmXtcuF^LnX#y8t1;k_$jqqpd;3=i`{G*gEBm;y* z3A&5@OwMyzzsh#vNX)yHTM{xPv7(^dr8094|coN-~cm6@c^`IR4io z;Qs>la-=+;4Qe+Q3<(RPwL*3S1)n|SDR2SesgG0PxrAQa2Cy8o)pLFb!Olxx%WJzE zrh3^c{u(u!S-|ht6>v+wXJl}qWsW|}oSR4GSr#j=zELs+)y^G@kN9MF20?l_QqppxGpmY zD4h=t>v%6o>DOq-%R5^ zTk^d0!|c88$I`6f z;$Ad23O*uCG>lMAbcgM1&zWWXycAr5f8FvVjmFwYysWbKw$5O*iSwvfe2`6>p6=Xa zXleT-Qt&wX?h~l!c6lnM6b{m({LYAuk-{=^ZDM`UW=6gTPlY%Wy1LPFvIsb+I^|j7X}*28Q;zg4f+%LEOzd5}lf_Cj0-& zSRtunZuz2l9_ikbjw1+OYM{eh2Fdalcd^NWrTZjaZCQrlNJu_7qkFO$_{o&dw*qA( zdb^&%)y!&|3BXjwQB2RTds1hVDiHpXu}9`WYpoDLTa@uaz{H`ic-?4e_)OwfY54T{vyZ(XRGHh$m!tEH!PqN-8z z46XCA3x{&j*omOxF9M70dCzvOH^3EFm1|Et01JztC-seU8vl!A;!M~>kYBp4P+7jPO(V@ySG zY)!qY0YqzT8@Q%)-i9i8kR?sC(xlOD%IV$Hqzolws%<)BKt7lN`a zUaq#-5WvrE584mTkdNxIgid`EWjaQ0LHdnamc(~DrdcNp&L8Rix|i+ri3?I=LN6wr zWa~XE5n^NRpP zVe^SDEoKn^h$258yMD{&SW+}-gB)nTGWF!@z^5?>NflI_ez*Tx4#Zo+ZGtqP zd?ZTGXh7^r0a>ME%8!xZ?ksCb^3G3zM=335y`&S%91RN0#it)|jm}9~Kzh@nwMV#F z<{ZTZm3an0e3YO$PZ-YS1c9w50Fnsd^$z(UXigfK`Fy}CF8b0!Jl%j8_JVu7HJA(j zM7zoq$F@5Q?ggqe?kb^Zd?=^xCFqXPi%^AP!EyBBW@bB~2RdBBU=MDsa7*oS74#=tSSqu(ppo5h~jC}WBnSQc! zoXO@AIcQak=O;ljUrOP!P>mI650C}IxD`74G3n+f?^+MYajc%67>fB3dTTH2r=s$> za?oZ}*)<+ChWA2T!<%O*UCJEB{wos7wddR=kflxM12^F|KzhLrT^&+5@;d&AI z`AceWb$53+5l_~7sY`@5C-36WPbRbZTI&Pn-~}+*8O<_#(bR=+?--3S{?b1fMR2~I z*$`-`<;vp&a{aHRjr+glN7Q8ZDj>=tPxz!!6jf$|*<0yn4rUHvhK}E1|2dn_nKoO! zBkn3E1pz@W{{WH@an5$imq7;a=10+vaPi>>6!y_ZgZ=k ze7=M2AzItUcssYrtYKJctKk!gWiN>CUwp2wQ?JVGJ@T)N_GJG&hkgIG_$X-hPcAp+ zcs%r75O0sP4=EoFc|YsjMmY(*c7@S*!?+*^%n{$jg-)ysqYyK|bs&=5Q)%Vcf080YuzRs#Ruf@@0t3h;j&pFlF&D$ysehvg8Cj$^SY>qxrztXAG5w`C({0h%pDy0|1X5Nr@ zi4A!urrRA%7DOe&mRnT0&K?*D%?r)ap05gF^$PJ%qEDi*y$}E>AEb#;_SSJ*V=T_w zRwAYz>E#jk*C{qCY~T1F4nAO@4#f8zm=$o^PXyYXF{_oidEuI-wr5S-vy0m3pW6Y|axjK!&DQ7}7%?Dws}{i*YW z1f+!Q&~4GXJe@|S<&XIV9Gj7}Jw|$(b-AUp)98!fX%yF^T(D_lRchkfeh6*MO$@DG zu{qvY7Ydh=!tqZ3HzRPEB5-*4hn(NxeSczN0YYnHf%ppA1<$}*TGRm&PnKWzz^AO9 zL#8_i$Hz;wYrg)xG!wr6gS?WIu(^(g?DDbu$G30#P5~)GW|KXi$}ue4 zc-mk1HwTROCD1~F!==-qj}e<9+uyWuGOZlG3vOSt_{DNVh%Z;xA|v!iMI3sX;_J=W z|K-@u!}X}xPYGZ*JHa^i%PylWpHI55UJfb>xE0+LbPFGE?xSTAA-I$8F4WoPTP#$O zARTrh{rUW>gPtJDZxS>Nk;b3MgIb0Fj-|bl6ryI$-|7PfDz>0&JQk9fbSRyrYZ_-@ zGoj>OffBScUgW*?Q&S>;(I56gO!iAT9wf5`$r$dC#*~vuLc(vR92C3s0l-3d8$b+K zIv!RF{N`BhU^v34?#k!NB9q=a{!?u=Ti$`stTk>+4M4kxvJlEQdY|ta;%8t^yMZ7!7)&A2TO=>VYQZJXdPA^b%0nBTQLDr5M}~eZ9@0u z!T5x$yI`ARmgHJ1IDUD^Khp)rg#1R!xCgG-jL4#)ciq~(zfbh>GA<>0H+)Q|wYh;m z<4j1361LvG(d^3U&GtFRCjFm_g0uTpFJ*a7EVb}y_Oa|lGm7mWPQ4X~7RNIg`KTx- z9V|K{kIz;dSXT3gSN|?no`;V(eG&xp>J@c)?aM%@WC~(eP)ag9V)B8$o5rGC`SvFr zk!w;iwR4V({*HGJl#n0XR_2Y&dPe6Te_IL2XY5_uW=wwR9r~wFG|wS<0{|J^&SgN} z8K;{7=uS0Zd>>ww7}+b)r^E9Syv%tS{tDDql{4{gbw6$6F<=XGBrybxJ5CB zQ21~=;6XZ#-rDRTcB5hwWVr9Y=4B}7f^~_8jBDz)dAIO8JyeGxhvAFfU)S@@$1~|H zKh!wX)Jy(C=AEW&876lrfz4Z5d%>ZXT-)yTTw_&j1ztQCa^IghARl9|j_P}jX~{;1 zZeKvjgQ{RCI9^^BHEWpu0;fQ~ZIZlAg?%mN5FqP}o=_1DTR8>B{oK-7UEPj}SMzC} zT7iR#P1)?_!Ltsi*P&R4+kq(3=J*|f-$yiGQKXq{31Rg#e@$<8i3|Yg%{3BP);;|D zOaxb1k7mHRbG{@Yh)_idK9#Y?BXm#+6l0T(=S;KVv7MBl+~~Z$^zRnV4L(=ic5Pmy z6KvaCA`I*7ekUVkjr3k8;ts-|H|3hm1`_VQ;lvJDH;kIWYHuiegkEp^{Xp6Gcl^ts zJXy3im@M$(qE7Vr@RM|%$teg67+QZ-n)Mf<4@mNQ+R0k3Hja=vb+m+fgsbPw-<9tn zp#_fCwrZB?8AW;lk}o%n~WE@s6^^Xb;hBm}C*X-!&~};0=I?SFdPLueADX9QcGuS!o~QqwhrK2? zJlBk1cVm(wt=DWQ3+%piJ!ILQoAnr8#j8t1C2z6(b4^g&_q0S1H*}?+-#|{D#XwLg+@=X6Jv^doWVI8ovXKQ(hMtkGSJgQYwLBCkBQn?6}+z zLWdAS1-d7ABus2ex&HX-8=6=!XG7aCLtK8}($r@n2e=5RN=U#R0jE~l^6+obV`MK^ z{|7b|zeMEs%^q>CDk}S5*med`u;Oq-Ln{Lz55q~Eq03jw138>+?i8PqPuKC&ggrhk zcT5&2;gzbGDW&p>%ZD&9B#i?yAymN&8xL_)*u=liQ#4xb{;2)0c1GGx>xXXQ>vlhQ zD59Cwbkdq)({lCk4rrOoA8yWCBRLE@9l>D>O%?%3@c^=`wp;6@6XzVzUsS%vW{|;6 zX^P3=+ikMUTc!97Wgx7-oE*;{E7t@r?6P?e5E4OhE_ z-kEC|`xxF1ps)}bw@@QPjEHUqhfr<@+?0VlsM|X38u#|h+0UEtZW?=e_q~&f%SJ70yz~CH66VM2;Y{UQlYVNo;y+rb8#_``0!zq$`Fcoj z#cK((%z|4ZDVmXUwEvRjB&lwo=7rN=S=&_II0he^nL8_;{pAN<_p1P5EX_;^)tMo6 z-)kxhgfIixAKb67Q|>Vm=*;-iiiHETHgMMZlgl{B^gbf34oAgJt&T}H507%DaOObM z#3igI^A$J+-6NE6q;X)e5yS}YK>AT3D9YsmsV7Dwza%(1{dohquljLL^5|hh0ygwN zwpLjwDidIc&3?h7Lx2;oWOK|_R8k9oNokOI-zvWc<+I*cu+sZZxJ7YG<4Ow@kN6^@ z6Ggv@2WGN@Mo(PfLPF@8%Y|J1b^1rT+cq(j-^k_yE>z8BpBai@rOpOBqiGWJ?=p++ zqh^h5vQNFdVt#tTndt!lRnn?56{^q!C+)x9zG!AR(7#=2|5_W%{T&W6F2Z1qxwucB z*dqMaE1$f1bT^wXQX24X$mo+i77uE^-g8} zIEu~ad$t|b`w`{%oltRYit#H|3puYevOfr5&F>krGM#3Z+he(tplQE>A=R$v&FOAI zOFT&WZyB3=0XuwAuI)iR3zF4$=I9ral_aTlq2K9o!Yos&DV+Xd-LE_p=9<3Sbca-} zP{|A!Zt!p^kv$S~(Gxm&`x!tDVGd~K;-2~=o&At?nAw7X%WU%V6+8uDKNi=qxCFK6 z=pm?1madKy&!LwNb!}@7_2O+=Qsb4RYC$f4jpq)MMv=p2To&zv&3BX74B;v)oQ?1b zB+%<0uNS_P`3h(iD3agutdhcJQ>1)wYKt3nnRH8EuD-qFH%kVz=7(8(MC(cbg5sCh zwMMe4cR8-RRtRP5>QtFxRLot6xlP|&yf*-mo!ME_7ZugsWmHG798{R!2EAJ#NBHzjyje=OinQ`{3CVEG=1!b%7vA>< zvy!U2N_5|iCEfp!5_x=Fwe){Bxv$t_&(~n2IG2m?0hSO1Iyj+PeaN)t6fl(`M7S}n z5bY_N@={Pnr3fVnR3X-!t$536*4myln#s6&YTN~Vs7~hX?m3>9SyPFTCuuCNM)*x( z4iyLDjKrU_iIV;t;w~U`xxuzhKAxa^3%Sgo!TySzy0a&_W8Xrk`K>h&ca;9Li9nGz zs^oyX&WyKLdDgqO(?Qbk&hp{oj}TO)b=Eg#2#XznZ$}&V>OnJ<fJ2C* z0lvr_SRuUgUQ1HT7{0LJjS+kA_oh#sho`KcmLOXENnk5pE!LaO^y0gacqJhZk=lsw z4Z>c+D^?;;9UI4v7lhb~o$@I>5n##Q^Uj0(5KG7QO&$RPpD-?RY4^ZD#lr3Yl0|?g z;Lzn|c}sLsATK(W;I$xC0X&|89}oUl-;#OF6!`^)xt(tf zjTc9)Sn)p5;$pb4hFG|(Q|~EYFGGPj^pEEss^#p_`4g`53X704vG(7LQBsQ}W=6Cy zSGBvr_mC4MXkjD(TK<9w|A=m0QzwF-yf-yE8_9wc>duEaqyIpPzROg7f^s-ul^ygb zlnNfKS}RU&GVBV<{z1xWMV2TifqDYOYcvkZgFy$8?{IN@e2DI$A1;8Fv=GF++dJ^( z0x@SY8r2YRG5oCmtQ5!=m)34r*9{pqznilx*b%;=D&fy3!&rMqw<4b;QBt7m!nL<} zazW)gs9qvrRzB4%Ea*HQ3GAzFJWsD|_M;l&XINbW86qfIWAEHzX>wcT(|yW(?p}Z8 zGo*RRq@RunDrE(=ma0i4sea{vN@HG#|oieWWCOrQY=wGp+u&X4zXv zb13aU0+vsFC(T{bfo3UD08bnLb}++|HA(WiS!=Pv5XB=O3{El``a0h2rr?fa_ZMU`ZBI64#gls%G1XU&)Ko)?Z9 zZ|pg5$8k6UpN>gzXlU zkOwlHW_e@ace!OY+ALXmi;B;-l;JtgmF(*FkusxYx#WuB>>wgRg~$Yf+Vw$!E9K)z zbz@%s!>7eW$8hTk>-qAez3Duc>pGkdp0L~U$M&reN@X!UT4(A)P-I*BQlK5_00XUI zVBZYxk~lOSOCiJCazGEsUw5!;;I}WsoZtB6L0{^y6AUSbC&wTV;m|3OTLOU+dcReB z=SuDo5Jd6rcCQ6Z(}QoH6sGw9?NN`3rjSf z$3Ji14cp_8_t)sw4i@}u4o0UF*uA!a71O0v+{b&74{@XSNaQbY5Fou?BxI*kPX5~q z9O)jIrhbbI3xd2nXR=KS?|>OZ9vqYxX5oZP>V?s-(ZaC{gIC&Z{yfsm zo$I(Ca*<__tV4e44dq?C7=Nc-x3~I$mS`w;*-h~UZuU#K!K7-1k>o``Z4}?w4288E z_@7|(GB8Jsx!HEr9MjnM7}kKfhu{7^&9E^B?f;Gj)7#|Zn_8*8a34^ZBM%k~RqwRd zX+wO|7v4f;Xi-|vHQ1-~D;^pAUgtl9@>Y{jnx0=a^{VA3_i8G{tR(S{;WttL5senP z4IE7)U)P(o__uXfP!;OREF`~N@$EFnrgDY<#$D9x-hTPuLgf%YY_o?ZrFloC%xlh` z_{&J1%Zb%`dVT|@RhvZ39E7|0pR1KbR@?eaJ%>L~)>RRoRtk6V2Jf_p{t zjaruQ&C9IXD&R@HJRf4kQ)0M?jW4{BOb4F{efA2F$WZTNf%+@rpY^!Z%FD0f9E?Lr zRC$zBejXy4AWxSo554&51G=;nu9;=-thLuVyhU%W^?>zSYC5qkJl%1WKfBTq#0@^B z7JB*9uD*S4X5g+X_iX8N=}~pqOmPR20+gd1r(l;on+HP}j|D_n-a;Cab+{$aEZ?na z_kt%ZhxDB#(lg*v1Rqa)o^yZ_Yw?Kg^>NHo*xSaCr$>msJK((DDRQ2!cfnBK%Nb)a ztNgfuK}hpBZm?Hk7({Y90@Yfv)vM_;C-zFe7^obO4Hnz1<~WRsEJkmH9kMK?2mE2S z^@!lJmnt5^%Bph|$`OwNUu;6T^>sxQbb@4?5+(o-&rcU3S}k~|GeSMrtl^?!@>2O+ z+F==OxIbD8)w6a>HeD@shTh=lQP>BP>Tx|9qK_fjWfOl7f`+oAguu)Umy09}9>}90 zH!-|cMLxY*tpkA|Ct2xUcUXzr}BfQ_}lDxzz!g5u>VYD0tbH3%a1f}mA4Yw(4ccw zN)W596GeCT`Se>u`AXrCtSue_tWIkKJ(7u#*=Xuw>VSt!V!f!{s_!fWc-=-t?yaZ~ zKJi}fV-ndfc|n%cGsR%375s!=2x1qC5qN<cMnpoobd*$ znx5@ef^mZy6+R;@)UCMLScf|8r19OrpB;mAXB>_@ zkh;Gcj$@T5=zN$t^tNeu;m@T>BGac0R?Pe5mFeD%1L00%rxh;G8#_008?x5U$m%qR ztH{Kfr7Lv%PDIXBi0q#KY_zSY_VXOsP82OpB{TCg=k01O_gH$G6JOJPoKmR9hqLvm z=New~l@*`AL$p#MO9Ub?_0E4K4th>rKJsF=8!Bi={C~6*RMqi-qXjU%!<2&gPhJl( z)A+C8{z60T{&r}*(?B>6gdq_8{jucDrhrxU`_9!sHagc1X8qRB1gg-D@D5m%cL6p< zSV_9<#|6w9Y*feM6I~!$DRg>l0%VEyh^Ran!Xs;e=OvsfEpVDxAh6~A@@Xi7cQI<> zf2C-p(t(6gY*x3r?t6LP7VZ1~f>(hu%J{vIVoDiPX}!OgfuFHlbdlIKO7Coh0<-g{ z0nd&bHQtJMFh^*Avf~uCflo^<_fN&3ED4u+G#nViE7s`+NzIS6-e{rKV0A+hKTl3!vu@KQr5y*Q6_~@1>i> zLI6*{pwIE;jR%s)$H`r15;iidU=>%pf=Mq*S#{91mj#%a;4~~x5325XR(+9p^J!}m zT3_d0-1W&Onq2n@s&aSN=us#XI2CjVD8II;veTA5LVaxOoB}lSA~- zlHxkJ?p6vwsOF5t^Wupx-01}jgy?yet39cVO)TWKYHrAwvS$Nik8OcN9pcw{?j2l_ zLIQsDrCAZ6Vt@l<%b8-CvT#cqH3C0x=I?cWjpBw@*>!} z5&x(oPe~3&54#qEZ>c&GUY;O)?*o!wNmV!H$my8>|DjUHX-^lqexOd2f}S=y5l|i? z_rNiMpZ^+;>O0t~S!rIf;$r(+!DvX$_#S=)8}C;TaiCG8`W+B{Ouu8OR1kAtCW!aG8E~SexJ38IhWFa!XBC4~Lursx zaMh&p%j)*sax|+a69^Q;*Qkr8fYX@jAi3$QoBOT7bQX*0B%o!resr80rEO2?rjSW%$~l2 zJFPDd5`ti%C6b>gE24GGPjjT21|TDV!JrVDwIH9w&dkAz@0vcH zS^*1Db@4A^BbbDiT{kf1b|KeM5Id}^YQB1ejiHF`a#4ptS)BjW^qgPE+8Uzgb|%?L z(kJn;#3LH(+m7kD**eTDpzs_VlM_yszMw+9KKW-QeT1%Z{{@WtB~ts^OI04z_o358 zNaIN3Be<$?X0CmcNN;??2lZa;rSQxR18n?sH1$>MSu6(C8-Ca8T@ zFXmj^97vF1^A>bDW_HcdI}fS`_8UWqDntJWL)?7B^RzVu$F4Ww7=KF88m;^l;Nzk& zDOGtB|keDwdsL1_felXTm0f z{-H&xV~^8>I=^Ck81$uE|ITDZa%BDOfL$2}M~m?S4Rqv)?4)6I>687f?Bnx8IZ+fi z$Hk%yWRvfVEB7D4Vym(@vg$$5-I<#|;I3S;sNB#7<1}#rr;5yhZ1|2r=w}yyYe)Y9 zc)1BqW;fk^!fpSKn+h9fF2iJ#i|tugY%s>WU)~-6!|&~ z9;}uc%6P_Dn@{~a9y(g9zHr~8yAYMl32EO;e}3Fz#5KG}``>VV7p8es!#W`lU4@w$ zAz$sp#!`^Fm7BO3DHBzY!5)W29-}{?cEIQLy9e{6bYo?fMMuH=IOBHJx4x*yvcJaF zI~Y=+%l>*OBjZZ*<7-ySA>_&HH6G8FSZJ*eI;Gu^UyD(k$D@KqczLZs-*i#3)QINY zj4g`?i$@83s0tF>FaCq#s&5Y+8)_wSKg65&#meQ}_#(*0yP4og%Djd4^pM0BzsShq z>`9BkxB=vlp4sYiRW%jk=Y&PNO`+A1H;ZKVWvzFE)R@^Z?fz)!Azq=XJjaq$5U=c9 zrOCB6a-AmU5^-`R?(p&sOgQF@K`cSrGCbk28p5O-K;Kib7}d5lNb{c7d- zJqc;GAr-4{E*b~ipWv%qDOz%Ux~?xyfvP7l*CgCurFFl^$eRK^9>vwDpyDPToiG{P zskd3i*%3IxqCt6#DA3%n{Q8GV@T zA>6rbq5bdsS+B9s5N^z|*{;{2i6ROhcta@x-wN~z5tVyYdXWt-Hj(Rs(1%EVaDnS= ztp>CStdk?kJ;iki?WT;^e#-W}I{EWF3j#30Pl`#y54@jl%?5&9Ze|qfHHUrJMbQ8G z@I4u?llSO)hr~jvF}sT}`{`bX_{c7Xsln#S1!vx(`+cS zo+GOEbqeE1V6@X{i{ra0p`td#!SAN7)_oNRym}f83`(Mp)#zbh==!QfUoW0y$WClb z_R)jR<{8+Rd`$@VA`EnVn#&5`MK|tRmyLx^+B66LI3D_`YOvGMB}=+E@HsLVb^;8j72nF3y+8$kkn~wnRafrIx z^e%&hYTFEadb?XCHh%H*30P6x$sQ_gJZo%Q4~y@`G>4@?45wktS5HuvDl0%Wfc2#* zO5{$Ou3P8XZyot^^bTu_Wbf2JLNKJJS%@EnhkRdgA?aHg{OqDX*Vwl{@rw)+YjKK+b(bJTPs6Al=jVOaFxDYC*vX1&Q*fx}K#%7tDPr*V56W3A+S;t>#WJXNv~NVG~}-%9myVj^QT#bbMootMf$0@diqE$mdDj*}zD;k-2i>9;Ic4HU`FF^SUS4(M-i9#tbfcYLf~5-Q zPj&&|iDWaHe4f3H_9OzC?mkV;fz1myPG%2~l@x~CSFncJwq`Wc?Djts>t*HV;|xGb z!x2t1xU2}}bc3jnBcD3je{Z=_pzo2#;@=874X6tvL?abX?$^a`Fwm@RIqxst ze_ipM*H4##QL~#8Hau(k62Lo5k&BV_$k~hA& z8|t>6wC%xHCd}Qj`$gcNzXSoBgP&k}4FIGS=LC1ty{Ohl)5@-{^S==UI*A}tz!rDy zCepYzPvy~I^CH~5130YVU^`UP*A2wSy{=lg)^J{Bd?iRPC2V+IgAmB;$dA_uH0wN; zwPI>J7fM7dNr~mj69KN)00i-QdbhtFI|5@uY@qcuG3DQ@=S~=6_X02T0b#p+^MRIF z>USWw!>07bgd8is6Z+NgQ!Gmqh=gfa(6~&C{%m}&yO@JH&qOEK*^c=&kva=&8|>;_ z%5e{qN}`fmDRpNHmW3-X&c6$F_;AfPDNo|rLckH=kvGDxIf>pSKDji17+A8oc_S*l zTEG5-soKU~XAh0C291)C;m1z?4>wt&&I~1Ig#+qbq*9h6Oi) zQXuy)z6IAG{Ccq~VlSumsY<9vkxJx%>Izrdr#}Xq?z>A7d#D=^;BJ{2^sty=Ql>)tN^HeHGWMYh7?4f_f z_fy6(sPR*;G?oF-TC(ev#*PFAr{3|>${1L)l$!TmsUpNscz%iDiQeq#ZjnYxnOYMk z39}xf>Iq{ct#;Uwg`!u`!n|1CDRu~J5od9zk^sVZorG+DNx?^>{1%C19J+nlx_o)ge!!ARga7SGkV)BB4S0a#EVU{+N+=N9N~HSh`bji z8a;Zjcj<{XBYKnH_0+)qtcTFza`&&e3Hd}k^%0u}`ng(z!`A02HLxNXaSy4td3zcC z{MFE?iKLATKp4tP@YGKhs^s9ua!Ir=*+?-h>^c_v1ric1lmedfo<;@VdHIOq5bVP5 ze|>g9v9SP}uBohdNDWw9Dq*7Yo;aLd;CvR*)KJc zoCIskEDj_G<1t8ZrKe5Ky~dBH4GG|!(<7O@_-IEi(@jvjaOUX_obGwSq|O3o(3u zk_0ec={Nfe<$l1C2JRrZ@@;^l#p^7rtozZQ|0UG-q(%^F?&F-A)N>r+mWT|vOVzP$ z`3Y*sC4hybPJF9AdnGdd&L;e&SaceP$q%J1PgIlKwy)Asj&40I`_d}o?RWSWHjrQ)-4=%tkK*;l7eB(8~O}-2+oeDrENnTAS_;P*8F~eO_h3 zu?b6);@tx;K4$>t<6Swe-DrqHH^@cg8(Kbkmr|~)A7TV>FLXDC29U7A-Yhoc$!+^G z(x&)A9@iz+6R|eaogj;3Hq9*Klz#>U;Xqq6R1_mC22hUV8yQ|ya0;=g^cuiC-tekWW zE*H{5>i|jyo`M(0zu#ysXhP}?PyT59G1Y2JTI=zcthP|=+C=TjMQvMo4;Y+>)+)mw zhwrZ*LC!<)%ltB1k@)(Jty)lQz6}glY|KYu=j4hai{4tYg+M0MR_GnBnVyZqI&WTB zjm}_Iy#XZ$m2Co0Sf`iNi?#lSlM-r?YybX|c^jsfu`68K`MF%JKyhNZ0_eVDITQf3 zgD#hddGFCfG^*?EiQH9g?1Ci-@mS>Sn1j|vEjlk)Z#Q3z0)bZ!aJTYgir)0pvupJJ zfF<0cIh`PN@roRE$**VNEoCNFpWf`XV^MGWHS5Xxn9y=u*vz8Z@Z+)s?|F{adD%p{ z`GZ0Q<>fW1{lC*n6!D_LtpKy2P?Lu1h2^t7M{_4%tTj2(p4e5B;h;0Maz~CH0j^^B zHNdpLE;@j=qprbI2GMg5-I-8{9=rU0!|PzO)~j*yQuBXG>dj(!&%#f4Z+aLQd47Z% z;U0lWPnANE6N&4uVqs7jWxx#Vn7}wNh)x33r;x?2o)^SWrd$1&Lr7--3%)<#I!WV_ zA3}Ah?a|_%kg~oOio`Qg+H@>H?Q66~^YXN#uEBT!SFOraVIb;WNA~+ej{p|)Ku@`V zau=@pFqrR6Kp2~`SS&1mtP(~F@E{Kw5z3)l6^icpHsL~q7UH!2jl2&Z(`-S~Mtv{v z5s$^V6})}J#X5Cwr7|c&AAHf*FoWq~NIur=8t!=8o`68hCX0+OZcbmeTe0Xd`dxru zJ5RwKjM)yG@N0;8*I*7{F~;Z3G6s5n{!TP{>(F(c#D}KIK~=REYISML_qG>y(c3?m zN~0Tb-^~2@=rMvw-)(t-a)!$Kt=c>Sq_0?~|rOckJNLE1h-} z@s5|qiU~%`zq<96T*bZp=NtA7$2I9K-&c{16-)+8;_^TjPS2auYU_a5nnkXq2A>O? zb-gTE#>5Bt@+QwQJ;%TK8J;r`2Zdx*?XryktfWu!g&Cf^Ar@>i&sn$&zjFReocFf~ zb@ueHTRnne`ac{3?TdbSuROwwib-2OvX92y0X1K@j5H5~aWX9uKS$9kSuQ1W# zX}yXWP)-K4g#)lMGnbhgT*8^TPE@^4Vu%Og4@0X#vj4%7L~z)Slq>-?c|Hdd|!ONSV2Oo;=spJlDq>BR-O}bVWFME}tDZ z?jnnsP}mOROrcdGa2I%gr%+wL5!1_5bq6GRETyO1p6P>E7<1Bh=$l$Ekv6BmyY!x! zI2;;e18OqR#nWiy^znaMDm(UW8oMEBq!&_x`vWd(#P3sRp4=~7^+FJL>zMuK$mjV! z18I0KqzMV8LFk4l=X3Mk0fJO}({fHKknnlUw|LYQmV1w?i?t%nQ$W;_Hr zDh7tq5aD$wDq?)t{hq*Rf?a%i6RcGqn2RJIfSO+AAi^8xYlHz?t2=;0wkV#PH(d1S zZX#TkhFKcgp6-}hfSO4rWo7Z4a1Wwf^Z>keICx6|zx!e$^}B{h;ci5r=>G=geM$F$ zj?@E@8auUiuU3CjTeX%)VZ#UgFBvQ(=96p`K@A6`P4Br`w&Ovzv_K5g9i1gKE)UAR zIgbSebDf#epBYjX2-h6zRGU-3sXwZ^`jWAv8BXoU>G!a+t+@i73htFuQDY^CZhWsZxDEcW|A7)Ax`J$g$x{mcHI_}1YtK^kbR4R#71aZwRReGIj^*ONlXZJ zqSb?#YXbj4kPo-{1lAvhk3S{qkLRkS4p%YX8(1N?fsnob2BfCN^?#5CT3-%{r1s2~ zk8#P!vpg?XSU^?FswhZY{ss(7LlA<9&rq-ax7Bu7lw=J?#H$e+U4V>~hjV7f|btqaCT#FNwM>8mx z+m}F&DO;@D^l`b*-HJZO{m~Hud+bri>2qzv+p(-e=I@ljcIv`!Y=#E(5Ppu)wEfZ-CK*PM2PNz7YobR0<(r#1*x1zm^JgpR6Rhh+j-%rG|Sa3L9AYrEz;LN$hpXHFK-O`4Jj;- zZ_d%s5j?l|%f>|VUMQp}U%k~WTnbO=R^S&YOzFs__d!JB7FV8V9ifznsj1uG3Pc^Y z1hKXoOk}g|7XYn)jk8XL-TNhilsD1}ZsHYJxF0n76~s{#02L38C5MBw$19`u1KhMm z>PTP&HPZSjr+&6QkQulF% zz?VTZ?N4VzfTje|OA;r1px+t}Q&x0q>KZ&+)_iEVmH7o(`#o5bIL{%@pND3eJa)R^ zZ%|(4#6(mVy55^+3h~OkG#pmTm5w1>i4c7*hd@SqH-<())tXIakPDET;1XtxkKq(t zQHK2)fuE=N(>pOgcXPyv=Qi=t%Os*>OL(wo)t-1eP%%$F>j!auM?IcG1B2w&W5q~V z(!>>73aYhiV)itu>rI6#n7kU97+AOXQtZa_j$Gh{ZlnDzKs4y$;NLWY+`dSh z@$IwioZO)!#5Xjz<3DDrE(Nh({*!O#Y=%vuWhG*&yu|5C$GRX|Xa!9R!^~Rc?FOGi zZ?XnQN`fZH>+aKKT(+UKG305VUhdv;{nEKhwH+g_^05`-OgUy&*kjrmoy+~7GNqR@ zgT%#np0J1AolipHChwQto0aS)Vb$TB;07a7K`U~N1~5p^>lC(Pg!o*OoDT#bh26^_ zVGh)ce$RQh`@({j?{c#2y@C`T)rWj1azf-FN5hSB;r}t~}SZ3qhnuAmlEn zf<=7|mv^q1R^d(^on|9ce&T2fj~6vSBrPJ~oW{;a3TH&14&u`;lyZ>`ZoB?*RQKg{0Z zSyBEiLj>;Z&;2enPS&%PdCJq={i5Ivm<5?vWtaL@{H8crS=LUEJr6fU(Uu>FjPKWp z<&x~_(_1J5SPzxfD|e&KuE|m@mj>CW%Hl`=7Rz*f_*6R-taIJZww!btaN5t50q}Y0 zz!^=xXzKDwO@TboqV4WTgk_>I+k2pr46Un#I_%dDoBf0Q^Etz*(D z{&{UND4)oW&q+T%M`^VtsU|eFUf=pJ9(P5NKbQgf`(~5b%XR5ce<1T^9`jYKMWisM zGH$?%!?~9Mmn@49mXC$_l`7Jd-#3-YN@pqI%=S?8Mz7XG*X6YRbbH(`OZ3$5{ouOA zXvVPeSPAk_(eZo+B)x5uhzcigqpdw(|PHR+@05~qrK|&Yb9;7 zwMj?E3w}9h=7C+L;9)S}v zHSOA6PN`8A(sv7bGA8{$FR1+BEsp9~C1j)Qq=NeR{c}j~*Dg{o-_kD_v$nX_-lu4* z{Vg(5K^==(nJohL#WwpI|DFneWBZ^kQb8`buROhOU?Jpn+nZF}KRtb^C7Q7B1+NB8451eWeI2Fl+yCX0W3=4NtQmV?h0cRZGlti zMbnEN>GC=|cUF`56yrn;Obz(bm!Bs3bvD^G3u58%R)MW%roS-W;cPN>?ie*c3FyVw zAK3}_e%1n&X^sSa%8+lVnv4PQE84o_)zYOuum$iGhEB3O2#hIfe+~2d9ldSwS7dm& za^#)QWa;T}^cVk%Oj`H1-k)Al9*IqvJsYvaph%J9mlkj2v^P?vQvcm$Sac(kfx=VF z;2*wcxP&@$RY0zqNMdpp+dBCQAt=iZ(b1vb#oh;6-$jD&K70QFKH|e=lOQgF$5D=m zvPZqWz$H~iKQzbh(xdjUuI(zy{?)m(8E#-A~+v{&CZU$HT_rDPO3~AP>YMN-?mwxXMf%wJi zd+mh3oOW@#CP-eTRfgLTX~Id&SdsNS7V{sl`68KH?Xm*AUGtI~ks?FTo`|jqUd!Kn ziCA$?7r=^W>o|&IgQHXG)>nKLb=G2~3;t1#3MaCX7eRX<<3 z=$=&n^F)^I9-9BUi$>Q74*n_ap}|?xj9;xH-w>D?#Fn2VN%jh8GQ;5BE!t}NuCSJ3 z!!Ujg|4O6wz!72S5;KwH4#+hztgTOtPeh-%|MV2VOS9FL$y+Pg^~M)`vwv0R}XY^5Swp_ z{Aiu);FMxl86SB6J4a$g6g10ecDnV$(xnHrTEBjX6^5kq4tmr4UGv@NXb8i-6H;=jGBgIZh zc}gaPitmDrl0J1)rkPY#`Ri9O?k9S#X;k%OTsX^7n-2qX;I>pj}*VR|N2 zpYDO3o5>iKuPE%i8F^7K^Gqs`m(XY8AwBh2nKSx%<|UGA2Dvc*Crsw7fQiH8(!Fkq$1>vveJ8jZLxdhvlkeP)!6 ztbiT zqRn_J@K_dxx{r}9=jV@z-^D)Fl9+UgouII0IrWlulNU`#)kt|-V$aLwk&wMOUrWnjMyr8k%$24JQItc>qfz1oaMsu!2+}j7pzjDT#m%0{qy-%v{+J(f)&jWf32Dtt8!uuTxD-Ggzosh;LUnuZ$3s_G}HFB3d9>}ll_S+sUR}$y33Gmc3P2zCa9bOf3zGg8Ma6Qnh^}qJkvMDY8RsAFQNg^RkKQ;M*_69-|O&v)yAB1?I?iT`Z<|t@4GT+E#fExwYTBewv z?JV;T?+nH({9P2_%&itq7xht(5n{HcuS=c-vYYtR2^vIVw#S&h{_qDJr>H4e^r%t8 zySPgVa%>v2Ifstk2>!sg+oQ8E(_P?zL{E1-1wYbX7V=g2o-ZIL&D$b+_ekr@e-q}% zgN2Xr53q5f5=5#=Y8x9NEu;IqB`?nfg`!F-zWHMhIBk zZY%`+frTIAl`7MvrtYWY@5&YkpvOsyA0+Km)dYJJ;<$98GojIvy0n7X>*?sdN^y?H zDE2rnxKC_XT){utsvr`1)ez?-b#7|S!oxpXXO}T@Q-;lpSJ95{b^a2^qmbBB&PP16 z$W&GU;$4B(O$J{k2iruSuXlP5yFJuVs5oIk;FdA^wTDxrF=26jbTKpL&vY^|J!jH|jLA&5^*we;|#0=kwzdJsPeTb$J5@hQ0wQo*_l4 zNHSs9qLInF)BOo!j#T!fbdcVnH{++6mt1HDSsbCFAQ^m`(97eM9GwrLON@T$PBX1!%CoZO5T z4nIm~iw5?`GitA9LiM?p7<0yGVnHwKN3#jyWPQnkb_r&}G=2ixoMMM(dcW(uG_YgWiNTdV$cl zq6Q2OQ-d46AEQ3LQTUDTvfx%J*$o zIr}+U_gX1Vry!Oap&60w`%n`=mK5k@3{1Y1mU>aRFzCNOfUi)6=3$j`c_b>C^0=?6 z0I?Vk@HM=zr*k#BqWx0y6P0q&B`uypP`W39Q##_32Tv$KT-ceJS1n&Gzv#aByVdGx z!THT-8^L3slq%9xZ=B@MJ~H(s7giUs{^Vyn{t-TtwR2(rMizt_N31p^mqUUkHK47S zJ*ywx!QsOS+{lEQjUUmhe?D$QEVJB*o`6Gi@KkozG$AmA&MuS}^jRj&t7<$(xsE0e z5PXe5Z2Ess--OV(X-O2%;-|3R>~pw7e0OEiyV;lWax|A{zC!<) z`HR8zpO5Ij=C;40QCKFAW|q)m*w4o$!+r9HfcHN!x?^HlJ(!k~a7Hp zaN8G|@b5Urpc&!CX!*QPe%_t&nPw&|1VDYIk5uS06CgtQfXGeSZ8oCvrahDt|Ef|q zR)su~@qoTi6Z*=n;;&wSbnr+J>hd8tXKqK7yI ze_LVSt-c3H2`f+`3ah3m~n3;xs*5-PdQG zW3Ul-`UB^NSpbrj8hyDID6+{w38Xoo1L9f&a)?KDkyQFkAO(Fx^UJkc%ud+ z8ik4XEf^hG5>Mqh6DK28UPD5T+c|P)9Xc1<+AN_gjwzR;9vc}UEZG%EcFT?F@nXfz z62~7#&|+`P#Ryqqjd+P_3iPp%|s>wEI-`)qo+qp$HCz>A>zeLi6 z160u9o{tr-)1cGKOG{Hly4z1QxLDHu{dOGmii@O3q6_5&NJ_7O3z0yC!a&L=UdBRw z7oorTPn*to%?FLR`v3)esk8U-4r4xRdNvftxln^z>iH5)( z0Idk!4(H$Y+9zT~)Dj+W0OZP2>g;C=mNz_rLhfs4qwhCC=S2>P!7_%GVLw`bT{J_ELun!2=JLyh*2C`C>dna zf!!Y91Qp#)t6X*{VjE~W{y;p*SQ&IqWW&WBIvEHq(|V<%hO|qZpRn_ljqu~*(#&|> z*RXecd*4^t)6e-D(Ixa?PCDSSExL)!n};BCOv2-y_$KZB7ijqtWMwiDlTx7bOh#`> z7B`>J>jvoSa>&ff#%q`;TSzH;E1%3X{@uvJxnbLRi(=m<%~e|h>&GEIDUHw^!FVd4 z{T+p1q|fwg53iH>dfLmQC&{BE%R>?>yoHi}Ps$Bd7S<%C6!oIYbp zHQ4ZWbsnyc1178d*>;3D1&$o?R8!_vvQ~dD%*NWY`6iv&PMEw%&TF>Q`{wmR_rxlF zC17v=<7*nFFr8dMT62eEhq9wHF4%c;b5h&ow#23^GI{`41cVwN|Kpjo16>GQbZT2) z=XXpEB6wb&4)2K5%gKsA89M=a2y#62aSW-`o}rLqem)CRCRQ!+m>R8v*;Sx2Mo>oW z0B+m_U?0MK)iyo|VqM6BS>1O1>;3!L*vdm{qn39(Ip@N^{0+wqJ5;pH(mcc9PLT9`KkS+f~;U&?)GUpoj&{@c3k?SR|{k?PQd>8TS9GCvf=?+;i|Fm}#L>|J_GXo*whD&ZBSg zg%D;PQM@=)QJDVG+F5~;I3ylqdh*q7B7Suv&|oKn8-&w|K;KjU9oM)@4EWWY3jf_d zHzFp>rKHhxK5y!v*CcL>U&X!1!r;j9i?$ zBk`zS3xHz0zbYoL;rIG;=@1GvjcML?FU&dziK+6ubB=yN?Q?NiI8vMt_=7w(kT8Kl zyZpWGWV`V%Mf4Rjl*YBryw5cF9oPa}V_;R2C^K)PKkS9@XY?GQfoGr#ih4 zqzW~_esWAsxWo@dfT~qtp8Qs7RowIqp)pp(r;!4pcE7O$SrC122q6h7vd!Xb{ju0R zucB3Z730UY$eldwB0W2ud@rlzK1G87hgp&WjDH3VpbxmGkXE4kj z2&}TNitRL|X1neQWM5L0z)(=t%8(>$zR^6cp@vv0&39E(o@E0y1nE;tE7q??U<)CP zDg#t{r_C(L5PB=Qk$# zP|Z9vS?VCu{mJjjsY8c$9QQ{)!heMS+O3dl?d#ILBUgHq4*TI2&$d1CX9GwJY{w>J zE89?6eBFEuah)dD3PZi(i&^c;-g5nU%K+`zoXOC5xZcrguY}JsI4oX%M#Gf*?*tYd zWq#$0UH!Cr`JA2;|BLf~-c4tP_8(cy9y5E=Pg8XeXz4KtssaZuBuO|kxcg<0V}vWG z`-f4O;CO3y{devBx5)7*IMdV9w*ZrGWGKDThsKR~5egPV#~I*-drckea}~;X%A$OP z`+{FxB9_2`%c1STecWuIfI5aDu@wezKDqfA4BjYto$$%6>{2hMcWYOKTB(b6dn_38 z4<3LC4=-g`$gGD44 zIB8U&uSG)t4LyKk6iXXqTjOD#L(hX=~3@I$&X!4QrcCZSWJF8-8!Jc8-lfH z+^~VnuS5R`mL*E`6#JiAo%*%$eVRY-i6?B*alD%2CuMSC(a)W?TZZoXlc%>K#GAPT zKN7wN)KR|0#vUI6#`Z1uf~z?+SHs1?RQ*Ib$5>QmF9m8p*{8Eq(Iqfh z#L8gF=4gPDgoyKm8c*^XSZnNI+tRV*VtBrI;w=p!u9@$9RBSSOjVRhJoZc);ABT#g zznv|8zN1vZ)fqEsA@9bG!HGrUNkO1Mn9c3G_XTx#NtkbBG?4J01xYg~39}$7jyM|H zsh|)XDQOpfp!PG6_~L@<*YuY==h!F;(P7%?$KVUMnfd(-W(1N2BaV1C$;9$eB3*+1 zlSpjM3h*pzJ8|CI5oL?eDeWR-$d{x7DMKks@_MQLVQ?uo%MRvpgaZSgIGQk)ThV_r zIS*}Mm(u9@_y1%>Ai!RQQtPue9lYhmB*kz z2#gIJhCmssdwpc+mzaWyhz+%k5DC?0;m#O-{Q8_D3#-PInRV?7C1jeC^i*OrJqzSp z@Oe+WME4qtABVP)8YNlY!2ia)GaQ}>?Qh64Rl|(${1PDJYKA6@5YtfopBS*JM!3cc!h~=%*vqe{%l^Ij!&VuEY9ew9u@;-XMrAoEtNx9T zE)Aj&fDmr?=+X=|!hEL%6`3coF-mxZqqk>!Z4H(c2yCS>rso1AX>svU)=zs$c0(sM z5}{9Z_Yu$54mmPi^=y4_dPJZua!uHT2{cXh-?2Tfjeje~kgFC9JrAEJ-cyYDL-Ru7 zn)!v;zvser+uW{)DgvV)$5M(UVYc{55j&YUzZN~~$HI+47|*Vb zg=I-md34Mjv~nu8p3OCGJrYUI`|Ow0PVQcYPi~Ffp7;I|#sAYTHn{!eNofBd(o#Gh_FWDD|zzB z#cM)l_4bg=5I&P5%wS6B3|D1_M1Z)y>zLdTvrMZ3nH`m#w^J=D5?5a*yztuua-Nz! zDZEH#1Rcz-dwG!bgP@5nm%lLQF%DSdY|}_DnQ}I}`903S5)k!mQ~YO$Z}X$8=M~4g zV%vgnpV)LUR|E>FipmjMCXhaPF0Ceul2*eJLj-lqPpVBL7iFGoyWVuGkMfnNklmFu zOe5t4k3a7d{JU?G| zB>z4DstsofX(sbIt;^2+N&^P)qT%Y@nzh*-qw=+DbjC$l=Z)XbQAB2~UJ;p2#4El) zv8DNBV&VgG42()dnXYs=>I#@s6k1it2l)L)@N2*rcsh$!YX?7okgrDaj^i@gH_-O( zi8?V}iO*vACH0yv&>JE6ZGG=F@3n${2&)BDD+CD&jqF|*A^KSf3yX6o(hP$+nRHrq zL2DdT@xSy12*Nv{qtWozm|lDZ$rS=Zf0_%auaW@EjrIZI$A?@|c~auzx`SMP!c7dQ z{CwA5visq>$`@P2sOA%{qL3ofA|Ly=Lx~2lCzz9@K1vuhH&GVeQoVI{g7j0Jvko01 z%c+_+)$@9cKPj@67?_pbnnIU~f95~@~MkIVlUOJ3r+M6r;FSo>b zDco354&zD8B-AH8^7<6H%ah;!ZdN#cUmagw@5bY2y-d@!DaFm!3{KW*-tx<1+8N|#l1iLQ> zij1*SI91*HscvnE%|`PBjw3NF^IE`WxAMtzJB`eZlrgR(plGh!qcWrniiuyoYYbOSqL zOcDj@hT^a)u5U^HTSAxbtes>*$fiwf(^zkW>EdqH)n)YGN>Zyk2)VeU#*;V=0_-kd z207bAi>K{PBI0{LMx#-zT^Oa^mA=__irA4-KYvYgvG$obr1cP7?E1%kZZizMxx|d} z;S#(L^Lo)sNrLmzYBbOhT4}#j7DDZ~LDjbUU$ozB7}HUZ3RTdF1U1z6z-Bd?dYb$2 z=i35h(sYs4cb(VN64y=%{U;Dq_oo>R7_SG=a+kXF;xJGYw=|}k8ORZ`tEYLAx#y>BwC#_eo^- z7+@(>2M%|+MhgDwQG3%hKuFw`KA;37Lw5fg43R~?YYeue!@#*Fc4OSVAQ)U^N|TjK zZaiX|6aqO{$ex7ltqg|xSMql_w!%)GD|O#!&OQE&N@Xhm<{iVUkCneoya8veY&8ZJ zwP2J7pFvLZM*5Qzc=c!X==^MIUiL8!{cCgu?q#FkUV9w-+30xx9b5zQ_=M4P`p-Wd zVJ?3*5mbt*&s6M$>r!s2(@nzz`j)>GE;S23FRyQMSvZx*#oR3p~+6H@P5mu;rkwOX+PxN}uGfh+MQ6Jbe!~3V7fs_al2`H-Qz-h&91Q_BU^l|)7(AWC z?w0{CSh}%K469r{F03X@X+9+o;a-L!@f;O>-rn?Vvu?uY>Fq#;a7_fus{la^0VJOE z8vjuA%_Bp`3V4N1BgEN~s03Nh-DfD!2a<0_%81roG>1~-5(kc>9r1?a=mf*1bhuau zf;yN8*)Ijt5XZCiW2(o`97&#C)ql-GaKh2JhENF7NL=@Mgd>)LNFl6a_bpgBo&|lB zg)`+Ba1vtREl$@!N=ApL3Ps|}BrY@A|Bv4lCfqK(sRAZ=EG4)|me!}z1a`Upzw%MW zQHCIw_A*@H6&3+DJtccqJ6F(1r8f;P*ryX}xcf&dKO(?3kW~{rTIVr-X53!vX8?d0 zDI;WiZ$w)s%GUC%pFp^4j{tc=R<%OJj{mUIl!YXm!G^*7X4dy_)SIW25EIVya@LN{ zGC)Dj{DqrUOsJ>_XDNp?=>80L=`fdLS4MO=G2MOR8DZT#!wN}wRCPE*R!?*0n7nqe zMo(qv+pbNqF8nF1z3ge)e{Rg`cb%-7Do@#ar9UZkgnmcB_-nTs!O(Tn^HM#n;gcMf zzh8q?120SGbCuTlAE4LdH^ z>LqV{(rMwl&OTEe^DSO4Fl-Nmv|P_ja4C*7pABH}p3xk5f5G^n^kw56;Yc(9dODK^ z+{RSDPx*>Z)p_u6U~Ms1I!IrRGgIz=kVP- z4O3pP036LcyQs9aH@UXrEZ;EO`uC301sO)fvTd!%vK_yW3R)QrjVFIF-)EEPv3bVz ze7cUC*gq@ms`BLzdinrSjUORZ|sP3J;f>EmAN_oq|Aty}e( zh*WnY#^11EXji&e#y9sjprZ6Ff@=;B-74ZNq_uVXZ(isR`-hn=$4XN}Fm4rjE#!d0V<I-|-+ux2F{L8` z+gTa{^87)SdQ)p^DUZ#v>|d2g!d)LcHNUvN0n74JO9M&jEdd`tjQQATU6Pb`2}k@o z*O=v<;%EPiJ`l~x0GI5}oANS0(F%$CzahBd2;YMk5=pdoP4H~isgzRic!-pWxXbK0`mQQ2UYt8$M$A1GM6cWc zjoS*CF?HDmM>aumNkzIM8d*Ia}Kj#D~h~kmIqgtj|xa?Hq|E@dUgP7w! zPPW59wpRCK$?HMed+%=zm#ap!2k%>(GGO_6MqM>WuQD}81CVHouzI_KB)W2xSvEG(x;!0$gCvLMltvH2V4hHk?3%%wm+CTKBI2G@~rS%LNbMa(g z35gR_!VMLR+)WU>OjEm>Hq2$awQq_%%*H`IO_}ldFX{w{ESmqu?k=A#%$mep?(~v7 zU^V%To<`ewu`Kt832bLFSL>v#Ou({%FCyYZ$0oYtHoixk5*DeQ4a*MUE?XinLbq34 z>BL{!r1FdROK>$&0SydU3vwkxK&5&y0W(!V_3*`^!7-%>xc+O*k^^8}Bxq%heNk7; z60><&hhmWr^ZpFlZ5tfgEi+XU!nnSa?X-+d+j;fK2^l8eb;2~B=zyB*R5-t+;kA}n z4U)TDVPvycdK@ z#RbSI4Vzs)1YFbB(!O~G@SCJswN{e3!JrH*wgKF@u}b$|%~wUC9`#dMmPPvL3?VC$ zBiU=|?@Yqf_wA}SC`-?MHloD|ue+#X4$2ruE9bpwk4&nDSUs|MVAe<{*>7hC*1y?Z ztrGdul0sw+&qpfnz=<8J^j{*EB&&=>SeO#@(e&*vY3Y!UJ;0Xb*&TA(8;^&}d5a2# z)LQ@bkfTWC`g7dE=|e#w>FwJx7?VYA10Wd2w{#mf7!JVDVsW(C0Dl(5Q)l+kN#r-a z^vKar+g9`?mGzz74utnM;?m-vB5U~jf^fQTm^8VEHnT>5>;lrbE+_BWNO?F<)qT_V z_huh|C2w2wTlK$M4B_Lw$$I9+qLb)~;>-S6j~c}~c}r04~8TG{p$Uam;k6< z_fLZ#$K1MZeQfOeRN4YgBlCTY%X6NG7r@^x8=dWk;#9GAO(5195`Q(ry%R7#faPD< zQ1dQv{A!=G?MpK9tEi@!8PrF6F38E$%pADSBuov9I0ft23yh4~2Orokm@OE5GYkr# zkeA7M#_UV3mRpLw(&+TFaGHZmdz14EBboIym&|@OVMTX`4VcPRMZRq%3vPWiPg*n& z@)`kQV>YBJfH-*D%DtdqGbdkX+tSC{POQRVm@@DU-KtBgaNt5*zi(@u3H z_0{aONGUHbr6_Iw16azG6azNx@{a|G__NMkJu~ZpKdF_2 zgPnE`Id!vxo{u;BKX`|nDyOB8bfEw0I9|}#bb?S^8h_+_9R&)V! zE^$<*PB${qA0H<@s^rkzZA5pPjk&rF5K-j+Y~WoCtqhZG0O-yx$TUZ(weHjl+gKA>SV^QvXW* zCBL+Or${i5BvJsgKl7F&`+6ed|H3^n{&NDu#~|>RKiB(L=gaQouwHI&vA&|uK(){K zMcc5uCHhe5RW>k`+kD9!A3EQk;WW94voJ`ewFSSPaV)~|BGcBrx2>JJ7Y+xUx*4|8 ztES29drDca345sbwsuokQMKg{h`ZIH^EI*^v0raw<#^GWPdY0#`ZBv)P&P!Pe3qyk ztOoc8Gzy(BF$*&~KZ!EnQLXpBP``z}uKO^>nq$9sSZufPlpIhbE#u1>_KTS^=N=t=RIX<@KX&v` zzrI7wt@8(A``aHUg1qx?!(!o8sDJU8=nrM0xq2C|*AiuFCXzAMOnuSmsCtuhDmrjn zd{^%Qzu*f9k$wSVGzWb!alRBt-tFW6mLt}uN76o5&_YyVT_qY6)yaDXfxijH1$zlPyoaTT8wdNXI%NFxk`;P-m+ zh~qeU3AHW1)GNKBNBo|LERW^%nk`e}Z%-J}82UAnXt^}UhsFn~u&?jv+wLfjl9q>t zl%obI3tNsy^Fn+-tHmrHcLE=|fl17PrPw3XiJD|Tk}W3v=3L8dJ)X1TDxoKN{{LA3G_mcoDRX(DR{+Bi zW!U5x>zb{;Et{ll9%WfiHSaOoF%fRq=x-d6iQ@KgdU@vJ;Co#4BGn&j|KY_CAKpsn ziPO9Z{)igTj(s!da2a)Hg9G8on6^*#AQ@c6ji~q=QWp~12Ds`zN22m~kIBQ7XQ}SJ zU|hU}t$Jw=>R^h7z7n_6(QSJ#JZq|$7} zPb`}bAx1HhXaY!@cCrDq(Xi3AcyU2|xDn?f`jSsTLa-KMy$Zk7Ft4)UzfNXHz@<`t zslMdp|2bM=Z_NpSLs1^(FMQN}YapA&klZ%lSuY$~IGw-xKu%#m9xKzCSc7vs1;?5A{pWHW0jp6-#}6PFRIFW*y=a;pTN_XoB>8X%V3WhZf+n|sIjw#g zOxWoQrgH{StM@@2@G8&{key3{eui8Gni`C!#QHBCVnB|KS_WDwkfrG%^(K^)sl_5W zI@;1+GT%A)-nVm3D@#t-m>c0>G(7D3c^)RM7t~u*LYsw+7RE9hTIa?bbGJTvZsnUN z-<#tf2pht4*ghHi5-rfcZW8en7dJr{O?Fv!Wje~;%4Q1fF1d(gdf81r+`Z>~77F1+ zz~#i>WQmo%xPKClz9opis%I=+>p?ldVlY2V|07>E9sYDx2NS;Mte267jO?Bj2n?Vb z-|$}lzJ62M-7_v3ITeDJQvkS3zDeF!M=E9bpRb?soS+k%6#I0W+(h-aJO*ptJ(Gt! zJV|=4AnNQ1Dx3*QUw;P%5ue$e%XZ8ye4^4-8ZBalW*r8*0FY5|IV(9PfP=5WVMkWc z=o}9{&7>2zR#u*;H>fWJDi|)Oq!M{_;1kD5uhJ-JL{Nu}6Nu$0o=G`6Eu66Y=M7b_ zKp_!Qo?mr1v3L}5brd>5cf_-@S{HL?8rZGAC04LF+QrAH;rI+mfopLte`6l9lTOx? zul&Yk@F5;aM-yoj%;vbZw~N-)0OXk;_m&WyihV2Df9`^}uo|+Y@fW#qTU}xf1Ic39 z;;Iqx-0bI-)gK$XBCX?BlP5tB=a=7CJOxd6eJ4*kgx^#v$iV{c%RF|Maa8jjA};Zi zv~z*e9=Eg3Z=wScwb*D3!Sx}pqsC|7KyPvFC2Kv5sddIVIobb535F9dVTtqhpy(lXhx>Za$F`Zb)v^GTaJmJQ{lOm zTjFPl=2M&abBR9FG=o;_*TPr-^fVb=qW5u%mH$1YzO-a_GkN_% z;#+Gz*Yg1J`Pv%^GC(~8hIE?4<+=o-I?vCLe|EgxIYW3dcmKpB!(I{9G@N=-x*s|2 zO;s~pP>-+d_$WgswoY^QW9cF4U`5%e%jP%4pnYLBbMpzZl{tr#rWJ# zqUyN*D|sW*_arl@JOH@d#M^hdbJUEZH}2%7q{(Rd-}TWXA&~NUkGMtLYdk0p5+K&t z6r@G)I5CRxWJ@-5dm@|CFw%|z-)ND|OnI{K&`AU~xjcyU;habzu4DU{nB)w-gV~=0 zGnnVl83D5rd`hqr*>sZ*8$8gF0JoV67LI75@i~_1MjiE}!v@fxzLq7Oti3ep%#P(G z<}?6gs+0?iGV6AU-nbWfHHm_LaSx|eC_#TKCAv@_>3yfeX=fs+ydx{!+e5ic=t$oJ zyGU;qGjExP5#>Ed%ol=QQAJYP5zs`33ggcWoXuZXy1K zL_Fn0Vj31mI^DfWWH)(#a~6gC?n(JoW2cdvuKF$U&zB_xWWNT;bXeC5y)>TSmF;!k zbQZ3b6Es|(N(nB}n4LH;(`;g+CEw$v2|9hXtqKtgRb-B&pusdIK->l=cj9#_#@z_sqrLSzJBRxXhj)x*X+t%GW9`^iIeg`7ae3^RB<>#tEgOf7(Lj;0U&x&wG|h!Mwc^sYXCh z?i-w@V~2L}XH15O6RgvWznN6N$Uc#DKg%CxpQzu64fDQ3v;g63@P$!R~2O;RC_!r`yL(7>weE{&l@&wzNb&roYy@otxlK5O+d9!dX%PPrsQ9v(%f8t78M#VvXA9W`U22 zVXvO$cKHZ8oHH%G=89=Hx#Knz+zej{GyM6j69C<}|F%ANwY<@03E+0%Pj&MCP#8|YNEpl06>qjMbqOWAAJRJqHF#H@vy5Xt96c$lV!Tjub?4V0ceox4poWtMR z8e*{b&nxxVaWAh@KS9eNgDP>QJCwk}-9Wmb@?4Va(SQh#`y1R$yD#aRd|N5j_pJMO z96ASog36o=&4y1dADsdv_nNHqnP~TETi;=D}LBx5HKT z#@bVp?P*t@ePTx?BOC&O%^zCac}JWkppYZ`jdGj{{lXX%d00MKmE=RyVK+|Kc|{cm zqF)fk^>(pd8$rRi3!d12phQU$Sl0Nn0JL8E6j2DrW|QWy!$_iwES3PV#||B5+K*ZL z8d+$>4tEATzYgk>c{*na_9>xPpR^4W1&1J=31o+0`Uwyq`)~)WUxMQ3DC@fVp{v@mgpoKh#Cox&4kU?bE^qZbh7o3%5Ji*lwWBZdf@Jdh_`|p zGNR!tJB<4VZA~Nq;7sJO1dk-1*tf}O(yG*IU%GKRx8p7nADFy-OwT+3t0{w4CY;{x zkp>~V-&FDQqx04PX;HqMSkS8==oE2L#O%;sjWl`!G5qE`ll5$4lhC3se=i;OV-~1Z z$lv(GLd9QeM>`~;SJQpT5x}9xwjwu5GqO&DGxap(%CBkIn7F%6Fm45#6XFGt=m{Pn zHZws{ncHXV6hUlGD$=4?@?N|*y5;J~5sfH6>!r6CCd&^O8#E_}+aVX|7k|J6#_hYV zwfI%FBjKG~sEBw=LO@HT8>!?Hs#@pw<0s;m+ym-l6%D02wKL$XkTTK_@*XUebm^ER z)x7pam;>IPwJ;T&snxo}U}4>w!lD?onYtkL;jWkm6U#HZm4>A`kG1z>OlP}R34)#I zs4E3;U)Ve}O?)_PuCNz4@^SEZn^0_D_t5b6jnUVvm86?$(nmR)#itStlGusBiUv|vh|&NH9gXMj(>$2q8@3>vmbvmZ`5DKllx_efET!(CCA;j7?wBOUC&AlBs&E? z%rQ?!Fz3U+b6;Z*K393)dbGp#c*tQaRm z%#^DOF28J?G7Z5QzQE^wH;v~zwo~83s?L60Zed4E{l`&_a^iCGa(gJ%QvT(OBiHYHerNlom<1P{Vzdz@^NRn>r>qUV zB5w57 zqBYHKr*=9Xpi7X_rYx>`Iw@Vy&4P?Q>9NstcHI#lfIMJHQfe|^2R}neLASQ*<+IJd zLp3r(MPWNlVQ^@Sdgm2kk*0t%oO*efZ?CZMayj&6b9yEs;_>D04vr?j&P?JBwxZ$A zX4JhMuWHrKHEgvmavz@5zD&XrSXn@7``Y3=5g)lscU$Bs7x!3;m5l*28O^7p{a7z# z+(2#d)0DwG+r^m=ehGN7VvgpS-|QB5r+h{pl~175qE9r+t9LWriQ*TMgTlAw!x3-3 zzuQ1GxgGWz&cn}pydhUJPJlCIE}HI1vjmnw)tj3$g2VgDRe0T|Kw;f!brUWN(_*V+ z{<615OL4VCg1fQq-lt^ZZ)zY^qJl==Eiiwu10gi_%`1Vm)w%uz!c<|MP#D0dMaIan z{p)#3sQC+weQn=F-DS@A^`p(en(PeHvf~u~w&J*qlky9esdZMl7^F5;)2bC?$DIGW zzWeS6{!UnrX`B8x*()*YCLXB`v8wr+?=ClX8mD#>?Au+~h*b{b2myF`45!ekpQNdK z<~rl?#upypJ!|CRcD6d>*=q!D7J!z462q34Kcb4D%ahO-9>8o&U-d3SVzpO;T$JAv zUuY?4+~VxnFO{#&rf!ka^QMHlsx7f^Zt)<$nAbKxsg;wZiZjG0*#G0VqaianK|8M0 z3A7ZqW*@o!?Te=(`33x#tKmClC<~VR3WrgSbZU&>eyxJ*1tMfAAHS6XNZWdZ^vH|2}7q*?SMufZtHZ?^;D8do#3iy5z zjcD;Z-j8+&n!1E~OG$Q1!TY+9v^||s@Kf)6oL25IMizcwBGZh}a?(tAHJx0Eg0_h+ zrW(XXiTxz+?d_0nDm|AB<`Dj~)t>FU=ljb0T0g#X!uS6sH;hV9l>A{iLhx07k| zDqG{1*|RIUJ4f;krXI>pfnGEImYcLlluoor>qK@>(bH`{!Q#Pi06-}-PU?3EkUW|G|_d(o84O-Kc;)I{vYNKs|3O;_I63^a8K^o z$B;oqKlyN3{~`=%MbMpv-A*QUVSxrTwN2~$OO66Rf{|nPZ?fVOw{?0bIypa!KJQMK z`@EUJf7O^WWp~v_jSau+ZUfnWF@Ms=C%kH5Bb+!_4Z!Y96K%9RqqpLYAhCqI^s>!lNa)DzY#rLKH{Q%ZV*bU9tIZzeuEiWRR*^A zQW_t%@J;+%&mDO0ka1vq!05A!;tuhvtm$u?xfa?y=50abOKBQ_UZ1&}z4KFkb)4y$ z=VNJR70Clo7<7Zh8W;{lfYme;jfcr;`Cnkc6JCy;%{FJK3ZDD&Gld%VW=^o`hDegr zMbBE5+U53f;Tqhy!@=!wW#)hkZt{2xagQV<7YD;F$+8m0xlMlTQT_f#L^Je!xL5 z)}#)f01qv=#e$HD*_F1v`e*TFEE9NQTT^aNceHi{Gq{x|;C_7boswQMy+{Erh_%Jx zo2(Fuh1${ZsyC#_cEuwx<~s}WO;2mCYA*^JaUUYc{2LgVmIy8S1&$;+DdKaSyHHVA zmlY^4ytMVoYjs6;KeKU?;+VNbj(093=rRn-Vm6CzN{CEP1{(osc9DJ#Gj@0309nD( zA;pmwDLPuKvo z3(6{AiE=M*$p*sGvr~F8n(9Jp-M~H;bpsfHk0yO7t=bm_(S#qVWwjoG!`)hEG0R53ab|Bv51ex1SYelp_$BoP&g@%E zWb_N8tjJZtd_x$#>uku}P+3WkWgpT&i zd#@?s-+IEu#vSdE-B*6Yde@q(qwtn2Ph-T7D3$o2U%Nhpp4Y#UeGs7p#MRxIAQ)la zt%V3oh_nBMx!=r1rw&(zI^|)mynIL*s*2LSQw1fD*sb|)nU#QeozPC%U_q63ttse^ znMTKe&lV71x9<0TzB)mXdauN9PL0m06j;O6rPBa0=x0C?s?9NU~SwH;IAD$zMx;+3f+hkLFkWaIU_q2J#N|>RZ2=tHBhXNC!Lz zh7^Zy#N`O(fvz*4mbVNR8nUwnO>b$beer5!UZ{r>Aie#HI@j|_JPeiF^>j_jw2J!Y zowVskv7e=atTecnt!&H_mcs6)^<<_b?|RBi|JWN;zWTK$S;7Ul)Mzu)a{$VLMe(h9 zmG`p6D1p`GZeEO#{VW`3?_eLRcqf&ziQd$8kER=*BsQ zziBYS&m7oXA{S3yqQQ==m}zkQAw4%g7;pbq={H8vu2%=-S4WGWmELUhhW1s{EcR_X?OD&G^+(;mWA8KZmPV{?#Z;iojgEf61ou<#03E1`3 zF}HT})gdWTF>`na>bD<2UfHdlJEgDWy9#M8y#7SABdzLeUlguYQ+S9;)zigxUM?mW zn?JGGCzcCkde0zOqI17^BUZ>?SoKTg`@<0U-`lzGZ37d_1pDf9oCcQu=5!;^tVk?j zCh&=@mkJk{R{r+J#1V@R(>vwm8|SgK$~Nh{exsa`b!GLKJ8yYX-@$dNU)d!3l*S%& z<=jW4K5@it^-)^>)oI7Rbr^j9*Zihy8NK)G$CGM?o3|R?e~7~(yel4kfw#H@{e5(1 z(skOXU>xgkurESvkWq^&lG8i?Ks?Yw;519XNQ<<4NUsCrdzY_*agvXaiXOk}wlKVy z#?c5DI@j;Rm7OJJB{JiTX#UYqJOeu-t1WR~cw74G!>g~UIK}%91;qn?1y(Zz2YaVO z5w5NyjG`+1vW-?_?Ghv7OLU97moxGj1wO3%H}+zt0ySKVen%^+_yIdH%wchMvh(hd zI>Gfyf=vH34s;jX#mn8DiE;AOqA=#;Vwxx|=LM!G-N#!ub6KPBw%+UiI0&2l7+KsN zT4~VohFzeX7BE;JbiDE)&RI%~XPIe~o81##zE;z=(J1=K83jz`e_dYz0OBpmRV0JkSH zbuY7WM8%f2#TsN*kBEZt>n@R`RPJi{?7FYIjwwjs@9iZF-L>7|JaYNZ7=_yWV4p3)m+!EO#s#6R5^Jr&f*t>z>8FZXC- z$l+pki}mG8B<_`TGVd$^$%Eh^a&2?R93l(4>JBIy*^?~Wg47jcXYqx^Yg^1BJsebG zNWx{1qA~+e&A5jz6r`QprWDE$ri)GoOg1$8)Cge82!D`R5GB=o*(IU7f2RA+mva|> z7&zV_;1LI%prW;Njx}`7p?7wHMAZ*hgytI%&ecY0Aa4q{o$+Z89j%B!t~p0B84|gL|y6pw+#zIeME( zkBQz{pZEgb7AYT@UaD|p$y-Lf?R8ybTT=n!@+1PmZQG%bGJKg_vW5Yz!S0V<2{V2* zF-h3VwowVb1OfE1Gk&gL-*g2RuU`t?(T`*+ZsMr@;S~D1oV^05^(Xk5V_lbX z)4#Tkkldu}1jI&H3MuChIjFR|uWPibpu4!Xos@&i?74Yn&av?*ZFnNq=BaK6R0c?r zjMWff_@f*PM41glG)K9H1WL=uhsE_}=Uc9uN(>X}ooUE+4m`*dZ11OIsvwPJl!Sh>Ty8cUF zdPU3tkrY1a38&?raC;HN`er?pM$X-{!vs-3D%p&yPmPOB^2Nd6SX4+;v5|`9zQ)~) z{l9L!7_38yHT4u%Mk?kr`p<>Kv^Df4(mdCf-%!wXUG*rRtZuD820u0Apef)sQ&jV6Vb<^JfGU*f#FLjpkq-`aE z7j26b2$>kPW;;$kM*d3>`?H0?jIbr}uM{J=1Aa1@Bp`S!UL&vr;(2907IF&g-eNm| z@gW8&#tt0o0qTs50Fe878DL_XNQiq@gI6?IAh^w(3$&bg=sm0LY1H ztFPsglt6voh60LPwnjUk0x?5cL7>pP*IwrzO_YRs*j2wTe_Y#X*ybo-3-y?zK;dtooFVW!cB>|=$%RR?-)&NqV-T{Sl!HGID7gE@Qf2j_M zG#xzFWF(DD4@bde9!vW9oviehDUNO-wMlsjZ$a_H*rG4XToJNu%b|89w1|4&&|> zFMWIjJOUY8UKTJ3+^%luCd!n%cY<5uK~C(8Yu>!<>Uipew$*NYrP{~L54UR%xIJU# zu7&ji@|Fuoa4;<)J06NQ{-(kjh4_=lDkovjTF+9$yRnA={v5K0nTf>BaaVuC+9^~g z4f4a=WZ{_hjF%BR+3^%y6Y0$Lafmgtg3JvrM`40HlVMIUB%X6VG@hIFF)y zlr0>SnCDJ^Fc#3Ew~(S7=7}+MQ)crJF8?D+LO7Ln&D&`H#e3V1$C%~^jjscjL+_K5 zHHzYQIOB~n<7Q_S$=B*TE98qk+k#x(`^}qOe|7c2@EVfZqXw>76_Q85>J7iK?PHTV z{$`mQ*Lf`xP2wz28h13Zeo3-lkVhp>&kcQSk67^43Vyu1V>o)c<6tz}mYjJF-`{}MI zBTgT=XADdTH)0^VWRqY4@fs@BGs{U4#_vRsUak0o>PKP3)oVg?!n3Yd{~629D6Jv%Ms~|1YAk|Y7YB*=bWQZQL4 z7WJ6i2Z0_NrXEdw4*2H13Bi!j&`9Wn8X~!AKK#T;j^k`mjXxKT!1POTGhNYMry3p3Y9I6DX^!~G z=hXHBpV$;Kr+;~>W?;EFe{nR=TCpWk@~*U1<_dukE4?G&lV9F3ra1#;O~CwRA6HaV zmox@N7WTuEr_Cp2^oY|saAI{q-at;h)H=g@rqH;|#P6&I*iWl#(am+DsHk-(zbJ>o zs8{>5_R!M}e0^~?uFEh79Sa(MQoBzcZDUkIQFODya=I;}O~D6qzgZ?&k_}1oyUWb_ zV8P}2SZxBY`MScF?!OF^5!n!a`Zl@SvcBII?1(9*}Q3{A$;*sBVc0U?-V60DMNwzk_sTLgv5ApgCeo23oGql-MkP>Dzp|*Gg6j@sma7ke zTX!9wj=W!K?ao$a1zs1*`B-jPH1G=t7-)Fym7g4G9vl{qq>IU?N=dY?DcM0&>+Vyf z`kNyKvf>X5G5dGI18@5>#eA^9r(^)mF}agk56ei!+GodH;Nj!dt?mJ~dwgr?OUSVDa)mw~>t5}1Lt=NPcb@u5&GG*RDi5V?@A3@4=O z#mm8IUOWf91sA~5Twp~uUg3D+RXzI2eED*35ro4*Ou`X2&=U2~3n*aptRZrp zR}2Q;JiioD%GO3+AQIy>de|*D*FpXY%}k!neOIiXt2&c2pn*>_E+8U5P#RE4UWd!Z zbjBiUuRsL@*_CPpeY9$7l!lwf@8;E!pqWc()Y{gI;Zf%P|KU*qD*{&b?A9W1U_1(p|1y{!^yo@ z#GsbPo^Xr%?WMI-cMGsY+6(A3LI!J0p<@NH5?#_^{h=+}mGMekOAA8?nrJ9>nG-%R zcEo8$ZehDzE!1Z>g_1sK*pTMU#c>>?!duOWs@6Ld50L_vSuOda#UwGa=4JoUV~p7o zIpq2ITo-3|LD7;IxP9+M$NS%Lc9gs^Kju)dmnc=#j$X+QerkbY9@y+mBt}eCJL?Y^ zbD}2BX?){nQp~R#>S=Y~{QhxO1RByS7Lq2{=qIpR*)Fe-k++g{#Bpl@!@tsX5F=PgC^Ytk9ja?{?Ae6BQZ^!8hoAY_=7RT6n@| zRN;6S_l~l9p7dV+3flg?P^+)G{&-ft!21u2leH$eq4Td}#XuAyMKaCPscr|N*g=hO1N@yp8fqL5ee z+JkyD+4X6AGa&Q$tcrBxI8ey22_>|P3||gAoNXB%n;_| zB{odfwWVF-gPq~tf+Kn9pauYi=DMftK7m!e=MF2_46&WM8OAyZtKo4PP#d8Bxn??d zrqAy)CF)j(ytx99IvB!4FNSzG%t8GT#GG+1t-C6sJTOIEcQ43}lxI#uB%M-*ur!c1 z4NIpHa?bZme-3S&ON0dUVTSZ1a&Ze1C5-`HldSJU9#COr^L?Xw4QmqcJWPFU9k*JV zM(z`_ikU|+MUJL|ZM9|uTOlYC4#W5$s61l>KF6F~#3Qk4{A)K@dAT_47$hmQ{f6{#V>=W)2VTmCu znT7TZ7sd$$iV&UBGpXe^1_W1(6KC6kd{T4=k26`sI;R7@Wu}`VZA91tfRYYP0~>S* zv&35aXu*gNqkvmEa+A`~aIfh!CW2KkzsS{OXLY~BlngjVUPJPgfa77WNl5xckFvnd zcGVHwWm0&|`aVd6xwNCfNr428Dt3Z>)NDRt7Wj`ed?}h-c;xntBR7`hD!}$N5kGPc z0CfMaU3XQ+`4CZ>^jbPWS+5!rb20;_^GmS~h1#yFVSn+86lZ5PCPK6~H&u|KgkN_P z5dtE>x{<&AW*hQRAFZU!C`NPAnK8$IZuZ}l5dAUhZJ2`@$04ff7|G`hJrsJ;c)_iZ zIz`5RU>Xa9_Bu#%K_0oGSAgb6g zvGq0oUm|jsyVj3f-{VOe_iA+V${yBe*DJ4fZKl=-n_Vpy&BI%|Mn9mtUkn}buc!MU zZ$}}azs{LAC9f*rT)w-b%9pGbPwrRz%wU^_i(Vu89%C+rH=r?1r??~k_WPQOe7l72 zrC9mH4sG48iY~tLSJ~Pt6Oc{!6;V4)DyNa;Zbjd;V@$*~<}EkJk3iu13x%`?+j?@K zspbyA%r4rSkB^A`$`L1J{-Ec{PBM?>e?uqie6rDGm$Jn&LcYAkXs$DKYW8{Kx4+U( zWf;@01mr2?;5Q`Xk0DCce%yKKy|>o(ZmVsW&wTxKWh$5Nn;JH}j<@f(BTuM9+tABh zTu5P0BQ>Vx(1WB!aWGfWjAXVAh@YZ0izm7#*O}wA?+s+G^EtYy-numuC{wg`KZf*v zEcV&~mr%Pl)q;J;L3Wv-Zd__nopxQnu(ajwjg08t*eX=%pAViuP=l#RpVwU7>2S=o zXdf;Bi)G}%>-Qz~dGXCfr#AQu@eq2*`kajWZm5;nI@_TOQT@`oN+6GBGWY+V5#odw znY--I+wBfb-K+T&f$8V(0Ua87s zhHAFIJ)C9@xcHFQRY?xbaSqTE_6Qkuwt`2_dd)|5UH3hZI2y&^Sy57+_H+nqh+( z(j8W4!dN|{bLXHSEwSDzWcna_E9GfyFfvS1Lww)YV!2BPGw;2fU6~zEY;f3%OFj@n zryyoG#@u&WNdB7W+|PkzZsn&}EzBdS2;D37A!T!@<}>cugH&_m(1r=b1>S2B=!SVi zRZm64=T2uh4B6d{`dzo#_F<8zE$FC zmkU_wh$G{mn7nfNS?gJaLYc1RrGHCC-MwA)QU`k~Ij~abEzA24O6`niUF4 zYs5Zc6}?yHC7<=A%P^)@ob4~v)VVyI=G`(Q#ee`y&Q-U4p8-QkKSf|72^Yc6R)!;L&AB}D_xZB{Gnir@ zmyQaKh+v6>G`R3G!1#Bv7YWj(}ax)7|RRAD{6CLNyX{%<;CmhC;5Q z+A!?JS9Wrq$!2tcrT!=p$~@O7K77R4U4^)gJj4@kPf{@TTRneryp!}!*$Lvrys)sL zRt(_I9~PS`wgJW`$c5ms9TNm_p91aTY3N!D?rI=Za##Vg<6hMx9$gigmn=aH;%W#mC)t;I1y zocYPRC~K(zK~^0|PqYRm*wwEr0kiz&W{iZ|)8m6ZNE+hvM&*Q2K56bR&+qS=u5^hL z5_(7oWe68FUi*nRMy?JM;3-xW((2vm8#n zLVpyPO@2_Qo5UKXM+*Kpm3%>Ah5N7Jwu&i`)nNyhnKxH3t_bmym`7P&|0}}cq?k4F z={#gFtk_&ocm~$751*MzxJC-c#Gk+NyF=jzuD;i@x6SvGau9`ItnD$5>USG%+{a$k z2~&1Q)2G7!`$R{|Lg#3bL9NEP%f&{u5Un*PxleG* zW|-&aI*J_Zt?VC~Y0Hy9%#m^8#sir_vBLf2@@%Cn0Z2=idArGHhFQeUgBeZ5cPCNS zJ6Z4TIet{O-?Aa3;~-t#jdm!<_BxN{Md*#CG+(fg)QE}KS0`T1pP+~-oP;VCP~Ja= zMe)?N)w29hQn!WUl7rB20#X&B*Ij~Cn1i*?L%bVpq^f|x9E>2&KILauEd|*~rxtio zrNjTh@rSf6?V>H{*7(0%j$& zJAP3$%}S$;gk(mJTGfd0pS@j*b)J(voPK5hyR@eHAV6lhF5VL`lmAyfp(dl77jm+Ol6^$J`=m;zqs zCnd;)7=f0Mg7N1rR7eiCEl~_f5lt`57`aTdy>4wG(I{$aeDEJ={YoxAc>pWjk6RnXDw zP&>FdDP@IoU`pZu(`?i<`kW@_NPk4>5w!MYHXb&7^lvg_Zo06JO6MfD#&aN}7|}x* zGjDX>7QU23(cwd}EWW1I0>T}(8L)E)M-4nNlqLq+jC@M1ad9uc@&87+dEexb6lX&b zeO!?da&={38i|rl@}0hC_D3;}@o{F*l=&gmOMO=$ck~Zw zd>EUu+X>(=Ar*>>>tNKE11?i z*C*JWVb9_uU=Th8Tp?{kzSpkvyoundi7Fu8uFE-j2Bja04E+uuGrqFZDs*i+od5a# zYCQ1wC3nZY8j~noCRmDssY$=ihVjq`0Ey*r>emZ$GD(U63BZI*R7D8Ya9O^u!&sUK zjyY1n;8bU5@gR-j>@rnGaMfRkj?SB8J3&^v43+w zvx7jVU|O`fT`1(2RiHis?W5}Z#%ZvM15gwr!U7keWs*HLxJc}vU(os>s5Dp8Z)K5A+Z88D#e278ONM#c9ZTP3&_O zp2CPEd=20v=!J)e<9mGgDgp4=MBGsT>HPR}zR2o(kfaa$FB*`@AIxlsB$?v{rgpod z$2oV)dxc^_@)s&f{VuF>X+MR;wgs{qFg%Kq-}X=R3*AnlngmSv^qsi=Zj>};#IbZYvR-?*U<75an8$nI_DO9*XN)Y7;N+TUeu zJ){}6E~IA8&Bm>uJ%p_cUa(NDdIZU>XuS#ASYB+FwhiabV(DLR@_Li@v@Wq9EwMz= z2yd3EB+9D|2IO@SsM-b2;*=Gn;{+1DZ)_bBet`{4duVy{rcH3S|qRUXPFV+ zWap$zmN|QEay<7vV()iGIZL%C4!NNVSFf?aGM;U(Q_OjC4a6d2!8O+3x-G8jTpuAI zdSHFh$ujQq5#+4IW36nwZhIWYX}_0a9<`fXSV6`v;Om|;7@M{~W&fca)Ik}MLKO`d z*<@lw&%c6VRgG&;G`s(*wof$uxP5DG$A-2ExH0e4TY6pnN3k*7;gu*nn=5d!Bi5u; zx{h+em^bu>OCI{+#~vG%4C4g)`s46m`@Mr7UdXcpnGAwMah7k&w!C^>_Hj=wbYh|2 z-r6F~jZfr?Hq}d7SSWakU(X!;>#@p2!sofUgfoG}jV~zX4jufFfJ;2OkSQv^AlV@a zeRbCJgr(S^soFO*^z&yt7^0^@3d-sdM7XG^?8cIH*#Dm5IX5xn3KuT+YgFRlO~7cn z^m_*($+m6&JX>ZnE7yNJtg*|Wx74ePK{gu2vNAexjo*{wZ{I%`ln&&if{xj9{rBgF zI}K`kpKHO@nc%8MjEET%kuFnuyLEiR>^1;MAm3;2m}gsGX)eaOOK$yK|3TXOU%GIR zb|R9_)g+2BL*x(JHCC#gOhsWo5&Q5!d!6b;Uy(oy!7Tcif7hE#PFC9XUIm2S|95c% z1!OK5>M{G@Wy3>$>L){QHc;#`P5$eW96pFc_$Siz6bs!$c28b@ptzH-DD8r{NfC4M|cQ-KYLQkWF+e7#tfgKm3 z4HAfWG<9AD_FGB;U&Kd-Py~$)OiP(k_UesZ!za!yu*yyZcl$RVkPJQu3V(^VzYm1) z;%9oH&0P-V?&s7ux&we8^bOJ(N@XDY(F*%xIv6;w=p&m!LV^S!{SvKmw&O_XkVcsg z0ONkq5Tuh@26Ec%IHL+|e?f}@`a9MT{F7vFQv_+101veP1&i$E7v-v`m(&|;Ey%Dc zK=CpGIOa2mes2l+$|K~XjzMl!eH*g5yG2k|#iH$sxCopZvsaXHijHvF(9K9XUB5}N z36M$2Ug~!7c2!*3-1`wKHCcJiDM#)y4d_DBy~{s!H8X^Njm%Enk|+w*=C>fpAg)sS zm%q3dT^+TTmHqS5up!n}_z%mGz^7^k3F>odMWy=$(;zN&Qv-aI(8Qc~>~Ln|RD<*r z+VGB8T7|DAI%DR=;RU;s<`QFS-{0UOdlU&xy@yMJM%OaM0QE#yGujtpUtVuQ_BHfF z^Kai;rKUU-c@ev(3W8^?o$t&Ky?KQ`SdU~6zdk7_zl1-LPrj`=F>Po+dwo_>Kp4O; z1RK#LBLuO~EqG+VfT_!(C$&~kDbUjMdy&PSY~e-N!jUalkZ8HjN*K9$JttaEO3Fq? zj2CH`i!PEc;GS^l7H`q-K{OKbe8yq9>?a1f%b~tL(dk=RM5F61_CWYIA|BYM3ijhB zN!qw}*Sm92-)ePfl92ob!{|bY;`m3Mv>0AL21fD(#}@&g5QQRk|4?SCgQSC7dq9~6 z?jb5GGg(u)m`{dzZZuTRaU~~3@V}&ht_L*6tAGW2QWTMAFFh{o<96xr-xUS~lM}$~ z_G@P);fn&)M&(qYe-0%OrburApJ@K~{S&IykwoxDOX4feaM|}9;yjqd-=VSOr2e$U zdmY1j!i)X9G(NqEaCrr}Y^%*Lu#B^|%8xg9-Mfb?7Xawp%lqR|hDMpY|Mw|>aYGTR zK|eQqv9=m9zAVk}Of&k0xx3|nJP@c`VX{6KK_U`NTR&kzBd6~4+uwm-Y z#5%sw?daz~s@GT$3MLxNbgoz?nld$KYdZD$nE=j$Ag~4v8JhXHIdJVOk)KYyOV7~A zJt*P(=1%@WnVwF^dCeQdrNLtoD0>5B)2T51K^W-lA(AwPg1Yk$qp3tbehUhWG#%&? zN9MMMc>{^NLPCH4$Y~>*=h9#;ffzt*y8z^E=p7?-%%>=%@>f>sR?47O;9~zOezS&D zN#^@1m+QHGfB~OpfMQiPhT2UBs&cGB;ukKX?BhjN9mRq>WJ)$eB4PF_y|Ks>Yp9Vh zM@{s;VZc#24h+%X0U?Zdw0)Dx5{f`jC1E#8Cc-854_-+D&>2F0&a;?^*47FHGOIfI zj`9$*AG|%3N>9K7nqX~cLW}T6096E}_TrSrj63AuBHZAt@&Rewc~uAHx;0;(fnj>> zzD=b52nm7NqwhsQhk&ZdqY39NPs;ro6O|t>Y@~z||M%_YVY-|P5bx4u6z;2nQ`H$o z%F{);yyKTZbfsg@ajI6b_mIs8luuC%jcdqO+~1{~ ze)f}IevIOF@{&0I7-w)-H;92SMmIu_ndZG^p|us@<-x(}?MJaZlOwqN=&UdVLk|NOG^*C5gN{FPbu*MCCiO~^B7B~5z${%mBhn~~$@m!T-4 zfwGzm#`QDR!M%7v{MdWrd8mnHIzfdi9bYWU?=s|~!wdG(p0f8fIm$+&uSuQ81cLXb zIiS<s2|_;>b7I?Jhx^EVa@Jv;kPcSsfPv`SBpFI?{}>sP1I)cAWUPgEh3Zj zgC<4nEeaIHc1d43wBj0&n~<}JV+9_8zLQnskB&OoL8TndDM;f;B>2qaRM z)_{BuLBa||OBQ(zwe1zeKGZzL2=3Jbw8{w=&_9!LudtuZjEG!8Sn9`&E2r;z=#UK444Ee@Q#G}zy<&i*hs7EO{-y#Axw3G{;c?%5Y>x8 z#-M7HJG72I^HnBq5&SwG!vZnE%+RzJ-+$HRo{e8L-#3D*aF<5Sr90QKv25@lNUUNO zz0DRMGqSa7ivcINRz1iLhQe4d^kGkx1f)gGsmEZC$d&7Hv>-iMq?{b)1=5G0TUk6x zK7dREZSfRvkOU88@%m{t=03Un`9+5%yEBqOFh&RR_G||vwhrS$pw~KESlf#IjgKrb zN^hT^_o~-0k7dBqPVYx>%hI|!{ULRo=;XcM6 z?`%zu1jI<{IUd6TMj;hB6YpifZWns?SF0l4{wr25ngGzSD7#p+G$0?mM*N;Ph?ahI zBXo*YDBk#gcmTb!#~vH}CBmN<}|MqT$^JuKCp|f2@+j zh7^mx`pWS>BRO3@wqqd@&JzxX$>UGdhHvBVh7$zMZWWUtt77~kT-YC>lQrmvE{_93 zdVB$lj6Aq&dRoY2A`HNOjB5+DlW*Ba=8O3&3z}72svB8^0Y6F!#vVbL~t3= zs&cFeInhSvYoT~mc;U4kCtQ?b?f3ny`kg^V3Vqn6c2*sJk;;~+>$QVh@#JXTJBers zjNQBuW@(DrlPq;-oq8`-w}peF+E6X@3(Xy4+jvA9A(?h3(ZHmD#t1n9ws+4e9VAr5 zB`c$~+y$5!c||lr!uRVg3&gjkdFu@e1xKWola0Zyu3JK^mfl=A&K&QvDf6-<159i; z3xn3hp{fLuP5YT;w|2N0E6!mHF(vX|Y(ANyV9eFSKnhmvbu^n>Rg{M7zJ$8PWoz6@ z;-nU?j;2oz(dg-V0U`BD;tHBA$+_p(#OLxt8{E3olJBo)eu=!Sloklj8T)%LOw7U9 zpKiZWvOaaY`5k$7()RsPyaq(ceqlpNuJPNu(8tC1(l&o`no2Dvn|OEr2p{wG6+#c) z1l_O2$4PIUEjj?>6HRu}QFY=Ov9m1zxM5)O2`+tV}w%Frb-z4m2S@gAt zhwPZo*%hL3NgXA#1Q=VhSi_0?mLaVo&d~z^ZRNvGnM-*TYwD5g@F%c!9!|`%-b`aw6Qb#b3 zVxy!-t7>>O;hchqzo4wht`D1GHP%t4>-Qw>xeu=sbayJ4#sZ6XG8mH&m%iNi4Fxr1 zG}j%`4|$5*hV7!fQvwd zGAlUrJAkvy6ncE{=wpT+QcEmar~WssH6SgY*W=f)N3|T=d+|X$5?a|8%3dX3DUW|0 z%5NN35BLq`PA>+UjFIDTxd}vxyz^++3n^LB4}bH>C8H?J_pZ*332=1Lm`(wb}BW! zS4{t{E;)EO$ zF!_?duTlrulGt;cMYlRXa?1%foS2hs^%t;MF8KNtt;MNBD0Ahy5)9Jn^~IY@=Am%` z6j>$eMbV%*axBM^w@wpJe^{MC5$m@;;OeNptfA& zpaTOAH6YekKrvlV_3t|ChluA>Y(~<37N++jYxyesppx%f1^F7Te?wPD;BVC@YpPxT zZ{hrKB(qHbd-#ItJbU4!`Q3K_cOU2Kb@lBG;Nvn31{+s9SI8x@_5D!Iiv$vV$)`)T z1;ih3%z~xFz!X|o7Slg_DPDd28tBX`-Dp=MCy{+QYKK|h40c}D@S^eFrOPwz4NuJ- zvhXW87qDNL`T;mdJ0q2H70X5jeWB8kh@=k%v7?c&@~56tmnr0Xy0x&dRBVgT-ui%UiWoJXY(7k%_m$0H7wIy48vL z1fAbm{+~gwqO8!w%V=Hhk}{jp^Jl3Mmak9oCPJtl8F#N6e~`=SJ@rS&EQ9`k%-0RI zA(~F<_QPJAx=@u??Xz$Fm;%B=*!n<@0jqwgL&-vg$V~rF4kVm~*`jT@n#_@z4L9uN&2EkD-2W$($_}65vR#ToO`G>eo3*&M1eCcxZxf)-co(vwh z=N7wI78n}Jx%Qijj^Oe3j1Sq4v*uhXB5%5?2IV*ZPFzKi7<-R%(HPyBy6*om7|lG# zWhZ=TEm2>dI(eJAxoOnqdMuu&{^Tz6_zMvjb%zcr-DNJ6>5^D?}W{(ust>m-IR#*0# z^eiub-P{gVVrMSZCL);$TVAKnD_~hf)rIgLOfLm|A1$(ws5VWR%IgmTCy6TB(^spd z1oBThSh~=^PsIyHn5lQ3_U)U<;^{6lId)kh-@A9@;d7gIGVGwJ{e-A4a0B-L6Z;;^ zJ;u`K?`0dtkWayh{7fUar=rlxV72++z+%LFjQwnSTL;zVp4sZcXZDOQp`jF5nXf%n zV|QuXnalN8T@Pf9$5^*j*|aM-n$2g&z}()h#g;}Yrl+Qk!EHIDtU1FW?gOd6J}v8~e+ z-qNe$tKvo29Y1^p$$SOHuV5z8$I$x0>cMjo{3p0Og64=Vxvm)4@s;y|m_S=y;aTz> zmZ7gZ4-o=eXd(Mjk{!?4XAx;4e@25LCYxR2yJPFZ1cy*yq(e|ok=gUj^Try$NmdP> z!lZR@?MPmo;ZQ(6^L+s@kDSnKZRx}YfTw>6L=$;#x$}+ff36R6N8OgG5(cIXT60z! z%ZTNz(OZELF~HXYa-xmb5X^nBN1R=PY@f^&5XD<*Ao6|k!ky-ysaqi%#XzWKgJK&0 zCBQV2f3=I7ZHCqbc#3{SlB?M3BvSPQdx}_oY+`->s&PQbMOPX>uDp>7ilCrS=#|FO z;@)hU9tF)`ei6s?wpnWYS@v3{!{qq?KB$m=qnm2CBEgTtL(^FX zMAf!kd*}{9I;A@Wq(QooZiepe9zaC8OS(kqknRTQ?hfgOk@VZ#&-cPVf5<4;-q#sx z9SeBT((C?k+kbkvJGB$})!!&MjKpL(c+DuxTcQp41zc$D=cAaI8vx86CE|*L7I>Ar zjznNT?t!Ce?ef>4*>23^*Sqm3iFBU{ZheB#kCLWrx)}$gt`S3amkU3RLPquj!Z$s` z!TC`~dXVLqawP|m3aOPK{*Quzf*+eBB_~U#cPc-3LuJnhYE-oMGtUCXajng-6q33A^M^huG_ovpSFykTSeD=_E%ODKg^ zcU~QTX}hJ-0e>{s`LHc$ow4-NS(izJCr##=5C!ox*DrEYtzpqH!SOt1un!wDQoY|E z`qqy>M=zjnBcoKw@2me-XPNuqq=!r0H||KHqSaq-u;ebe9Z}wp+L9rCo2flxPMf{t z!x5%UD?J9IhcVv)A%m)+h!{US(zG-j+{_|{$t67=(@J{k-KM*`;UZuj`lQW=(KcYm z&-g0~<32+?z5{_@+uyNv%x`i#gfbJqW>O%Us@LZfGe)cou4ic7eVR9noqjjX?n&g< z+C-W&?UvF6%Lm^$h;sG3{xIg!;!CARD70pt&u#8TQnA9)e@p(OJBuQ z#fWd=>QQ4a>EH%;u}_R%NxwBe>Y$C0g^hqBlsP*@irn6>4Qzap4rU(^4mL#CkRwzN z$xw-c5`zlKZ^G~Kj3k-ReJA&TnH!}*ScOhAlV|D?(1eDuzS+7wMZ5E??ppI)ap1}| zi!JQ18T<28BMQvanbeAqg@Iht_W=*?K{Q11Hei34;57s@*n^zllwZKU`vJIBibY@3 z+DDDcb!tAQ|AnokYEk{)BJl7>@WFxgy`>+(H=E(q&Q1Yt=q{z?*!i&6tNNQYC<)dX z1MFFrN=KxN8`df)l@uq=S67WZgp>y4fHIt+au9~DG?r;s{T3sG^MsVmf+%o_M?WW7 zpl8~eBHr3f7ol)+hoV=`T#5Xpc1Y%9Vv78NMWGB3QS&h__TzrdFk>{~`vYmfMJJ-q zH%ShOiuLt3OfvFU!DX-@CT|as3Zr(tqx|JW`M4$bzS}AR>2}+>QYm2jSPL$u^m7xeQMu{{gqLu zoi*V)2jc}GEQ;bJ&U0^r8bp`{R`yujUH>SVCNY>XYIY5q4DS+tV^E(Lr|e{y{z!i< z^7|4XM<=>UV1h`igc@5BexQw0vn+>4VpZqrxr4+?1e^$#!r|ujvHP>IVinTZSSvSb zuv})1)7T78Knba;sT-5X%nU38Az#G1&Fi}*jl<@Zea1nwKkMc4;V;4xnKEQ4dCuai z_THL@1W^mDVnM5oA#6@j^$>1N0=qTx7if7O<4gmj)&7R()g}AZ76idVAeJ}55%~66 zkERiK36gI5A=}UBBm=iGzG)o$FRybWz*mZ2vp~>8CCExt2-C!58Arv17?zD6MG0s< z`Ec-MKtm~-{VuC#3c}RX)Ia^8XIz77fQPb1PAmvi*zs^wb~DO7@pWWQ@VqVQg6t>; z^aE*=0%GC3fl5Mv5BbR{0tv<*Mfq{Aw^|5DH)#|^XgxyPNgQ{9xQGDL3B_6lA>AI` zO?bbf_?7^`t!_#JmzTICS}YoJitJ{EJsQplN3}&>D+32}V-b9G%=9|f2VP4$aMiP4@#9DMC%9@IUo}7uWhhoP7MSA1%KK0`lQ#xv z-f0hfK=)nK;lb#LA&*dfI=SiA)t7NBT<(*jWY3eucei~Y!O5_-Cgdhq<_-?)LE#?C z)!ZaSn+iDp#r!8gqf-Jv8+m7vDauoH98mXPGg`GlV)9VS=N(dzFSZBSqYf*IGrAZj z$L7gua}o}&unHn|Wn)MR8$hHTeGAfXIxgz9!Pva-c|;jdAcVri5{m}5fkx;R(<#f+ zVMe({oyLECIX>aldvPXX&c6(VtBx{EnXI>4p!w#@w`qvt`M$c~T>zotJMW;K4|H?` ztY+WfF*hk@XOh-O1N5b}`fEr*j$Jx2ntu)yx)VPK;(fVbKm_{(#^q__!ciR>n0!-&jR2^e<^|d3m#01yX0*gKJPilTvXxhPpoQwEZmOv|E0uf8kFBp_pca}_T(HpB zx&~k0W7#bl9TJN9fUH{iQ+zye~LD<#FO7vOwy98f0x6kWvuxvQ=jwJ*+zk`!}-vZ7vIX zD;Z8aXDhk9=a~P;TmI5^lPz?3+z(jcZHngxCr+4-olfn-Tjj={u1eAcQ0RzA{z-tl z$%dwumgzfX%e&WkW^h%c02$!_RlS1=se$KofB;bqH@ zJr&2Mx+zNQEitxtOw0jii6(*vyPd1!d8UJd$rEFTbtwQYq=bCt+hiOD!BFeFz2Yg( zdSG5PN(?dF>j^_M)#uDz0Imo=q%V zUF~5j7lZ-MLS+~Fs}3u;0|eyKY`$l?=!Ff6{sba~s~#QYjr%Y@(85$9w$d{WKLH1_ z^P#S$=t@F4iVlkdfA>3U{?kW|Rw+&a0Q?IgaOve;!43*Pe^??4v)10NJY@`Kp`dTu1-5jCFpj2aRQqE*h|qE%nhc4B@4 zX7eZvI3QEni<><7Q9tZoSl-uNgn8M$KuNvv1k|q;zZVLACJ`;h4juV^f8x zQjtqaNG%k!`=V`o?9cxo`dssZDw(>(b*|3Vh%(5!M3{im)9XvF7^<65rmx2&6l#ffP9dNHMIC@iN+x#8}!G|s#q5l#@T$63) zR=;1W4|drddXw1^z}RZ0Lw-VHPg+}?vfMC+d!L9u^FY$ZH0gBsy}>}B<6lP-;d|S> z-0@NYi;Jdih)`&TD>|YO(Fe=W!fr}}pWIb1sa|Tqun6dgoy6!xO7(1nigwklE}=1H zylYoXu5xKe*=qRakx-t{hh3r8k35883>GF`=~)s)bKL2g=PHzQBdfVrpUYP1t@gOX zxs7E=)HlEs{ujaBh@L)t`3^Oil%d+XB;LjuJ0H%dNx6wumOt-!Af6N1k9uT&F(e(B z-~5|_>kyr`EkGh}VxCv=lcSqIbngiwQDOgQmS?b{ly8|4Zcq3N-=NHT==d);;*wQi zgktnD(W;Z17Jjl5tPhLwK!n?1XMPQoP=%sy_BMyQF5_9e>hMfWt@;z$I7DsJKw;dQ*jR5Ad+467Izve875C|T#h#!J z& zx8By=jpi2v)XXHKgq9pLzgVM*R}LPtIiV|`{!vRua}Z+F-4P>34eNXh4nVB?JxrLX zd&T{<&Y!AVpA7?tIMhUOP2QF_4#wvRCAcH%e7?)hTMZqzZ?CAZMHYiS z9;bC7Mv@Mn&ZjE{ePSk775xLakcW@y!js?$bdi!B#@y%*$N8TnwVOuKBz)tq5um9v zp(QQH$3!zw+jhHlt1rA=lA?&A10J2$wS^X69>0$edOt_`JNsM~5<#18ZKsH36)-N` z(o={16iLo@a{?c1Tr|5v@l4?0s0nw0D5*DE)LG#d@byji$)PNFxOjLeHn#B4*o7)% z`lc@R#Hmx+l-@af2jpmRov>idU(Pk4NT2!#j>(ZY-{5!Hqk3RhScx-m!UD zPxmYR@LJlYnk7CKc}ykifGV*Rv6_nLjIWw0J?2NPE8aOR@0Tf{mNNIaFpCqW(=Vsd zg8lwJ2pR4jEfKHcG|)NkwTGwd03A=nc>S9qm69mk3uy?|-`033GMWt@1BXLt2SvLK_V+a%TIBA&&%!y2=}-JJZtmghgQF}ITMYV zRwKsGniP}Hbk4AuJ;Kr77Fnsd=OrZ6CDFS3-g`V$9CP`CrPXb(14ze$TxDuG&XoIP z=pzL+np)qNJI0XphH|>7l?8?AR?qC}-FQ{8Hc&dJn5PE4+=myz*qUc=B_JbryRxbgn&z(fk-Ds`!7dm3U3x1}@l4 zYzgm=_- zNmri`sMnA_iGJ!Ue{skS3BF8f4B_0&ywX_(X;;ucm$ilbA8zEQ8LpErS zCtI-%0qw7b$#?;vwjAWE{|~fxGbh&QxMsarZE{uOI*?AX<50cvat8y?XEGp5ZNUV$JD^VosEBKrei(3J2>P$#t1Ef}Vg0a9RAg~CA{Toa z9Pl~TGDfy^$lVr`&&u8>#AT4_W%HE)A-(!5TR>BE`rEQ~(-i6OdRwa|D72e62W>s@ z(cu9A5B4=|Qt5pcI#u)rhq+uWUtOB*E2Kp~{vxgx6alGZfMRO%D)#6l8<7Zl1VKcs6RF-7x3H05Kr&@q@;@ zWl=-ZAA0>zD6d{49TCM8Mjl{PqlHYeG$4GtNZ@o=o0#kL13t9X!-7$cSlB0o-irdM zsIR*9eR;pjs%!uWW8VqPwlZ9vn8l_gzKRPHJe1{imzdVh;oVz-68#%Q+*QvT-9JiB z1D3X=z%FEbP;VRDYJmrK_T%aNk`IJUw!w~8W3^3JpZy+zWdY9b$hOVzLV~CUDR4qS zgsvMgpg&SV06A~3!)<x-goh$QNkbkHL0>xr&?6%QM!xDmdh$+p?0<8^IF z-J~0-ewC6bjQp2`zIi95W`xRY@{H&ii*%LVQ38r^sf1DJ*5ZhVMe>m!@qf<5xsD*3 zJP3;-vyoI@@Wnu>(->7JKOoWYlgQ0`AaToyeO7jEqIK}{ct40_ExTtTQUw*Zqa0dR z#8?;`7I|am=Q$I$7!KV&7@=}IEY>;eWOFY{@JS5)lueuez|hpB90PiPXZq`T*Sz+o z+{5u&j@9b=GG+C>vs(lmbYkNEQFy?yq_Pc9H=cA7q0M`7LM>=*L@=N7H(@g`RJ2f?tnV>3hh_n{!EjyB(GP9XVUj1SNOg3Rl11eMRGX0Wc&| z15i=s1~HfO5?I2LCoX;X%ID^7hz2qAuai?|QvBfnpcCU#MjVPDIS0Pf87d#Fm zAAcI0CBD#lbwyTucmMF;v;b}Zn4gJ;)u>Zo;4Tn~@j0$8rV@gDUv=tYZxCC+%pbhY zNb4Qf^t1|>V2`rh=RjSAVA&Hu*wV6}xjYK;hC#zyWCg!P;-HJ~e+rJk>jZ0tYz-Qp zczf@IK$6QKH8D}}*p*g`=c)Wjta$Xdui+@NMKG4w<$7AoN$pB1;WL0L4^(l&@Qx#r(JX)Yci8)vPher0$>R|eS_G`i9jYI1~H8fCF%Mh2##t0U3sJ%Jp4}}SIm3DAe(y-r7r;Tv72G^ z%o091oU8QFVO{%F$-!_A0ou(J-V%JX>e$01LQnd8e_Z{$IvO}*ttC-zg=BrH%d z$Y8lh4%Z~sU6Yq&a`0Uon3*~mgJs`&jkEo{0Dm$s$syyuQaIp~GiIoo;}_W?Nz~%# zOctz$5Y+aHk+H;$)oX&hfmM9NdRpy2PN@U8M%I{?IA+-SChzdw$W)D8N`o26=BIj@ ziDz(0mM8f%U-C&F*Mb`RvlR;{h*|Pp?DTM3<-h+VTaar>pX|Fn`0(lAy*dH?9QVO? z?b_j(F?q3p6xEM886L(5Q_@r7px`3bk9&Ox<0d+D&-fd(?caFV`&J`&@W; z8iAB^vVtsx-o0eTT^ zX$Fv|hX7~Q9oXsin3}D6vAEkWR=*p|7St9Z_^ZZIgdr`un6T*rZ)S0`BTEtVdj&=; z4;1?GE$9>o=a_OBCa|!Ys%AV@nY>F*htBc;A4Vybxa(;junC7no2&^$oiYm^i7x=C zg!cD&!G4FuHrXl-vmiuN;PJmg`L*LDRbkkwbugjXlD;~4Bn{~U>GXW&qqWm?R@G4p(+A251^HXya~z6KVnJY# z1By*|H1rMl>UO)aF+SacH&)|7=}67z9gpT}IFzEubW)&sI0(iw667KM7yJl;&b>1^ zOCS(yo)6@;ssb|rnw)#2%S~6rfh#~sjs~9e7siG*0>rB%afZxkL)+gr9qd2l26b0S z?^(X0_bw}H7@_=0vQ``Ezd0ztcZbfl+$;o;WJ{@dD$fysRhpvUAy2!*Ne#}{5~!r< zfT`C-34hv*S#Jp@R%3H5NkzS*R`9TruWWiQiSyTK>&{W)HESsPmeKpSY=SWRoG?&q zan6h-!b2etOt#hbetB($pd55F8tvCbN(HQK3n13!js9>#m!E8iidj#YjkuMoCAx(>+=7YiGgittr9`KYVfD0gdn7*Pylbmt|kPTcf#TU$&0yjxv$BZAVWR z&!x=HKbMvL&tkOM_P!wLaoibd z(;PA06|jHdYy2w96vO}v!B&SqU_#kaF^hqsaZ?2YY=9EHGCW8TWMFRyP6$7B1AL{f zWA;@^xDSJNfS&pcY(@y|SCWZ%wHApvUix-L(oYHs3e4YdwLN!f!3g#NOoD#=s4FuT z^v3FHrLOhM^Uih zE34+k1gt@D2wp6FhpO-IF!?|3e_$BoB{B z?>{R5#|$%z)cZDls7wA!N+Dnf#!bVr>i+g08NA0PnxYbJ5OHxE5YCI)-;l6ipWhO;~xoNEH66V(8%YuqhI@0K?%Xmz~ee z7|(>!Iq6_sATzOSlPHiJETsuWXO9kt^*8ns5$csQO}{r$6%R^I$Zf0j6ONHgRg3RR`905`pk2ZzA14rDdK(m zg6$D+^Sk<|7F1T^rV&lm+zA!?nsi@wNAWewRppu1N)__lW7qqy2#O@SLNZs!~Z$)Tpp#`fgN2iJ?gAr+9yS8^HiulO&wmRMHQ1?M2H^?;e( z&*P?tS$mp|+;@IOK_2jt&0o)MZkh(rOAVJIeMg7Rdl%>>H84m{QV^s+dRy|S~ zKtE}Z36fm}ThF}N4!?*4JZJ_X6$VB=Aoj#L3lJeL*IC6?x4s%;b+Hwnm_UKGQH&q* z(}gGOTNOJ!^#EzNYo|>&!=f5o)OMIKC%m)g)6%ZMiprydNe!IyExp~T@*&*og+}o{ zZ-A-VFZfdbzmjpl4rIR%mguFL1118KLqiXz+v+rF$?d~LEj%j@S< zdG*(;5Kk}EBdX=YD8~2VsZd71s)Borh&Vap*J~{lDDh@WY8gWNUjtX7RRAS&oysLQ z7sn#jhX1t^F(624QinH7DOHh99Z_HZtJ7;c_$N0gr@?WJc+x`ap|4cA={hVywfXo) zV3iVOot3qej){ zK2m&=6y1>`uCkhAIkl}Y?_RAGI#FH?x%$1X*=M1*m0;q^dRLpWK8XOD(1q>vG?K;| zx0N^i_d|l)SBFrXw3i6oU!bOYyP;0DzAZLQ-2**<+|b8pn!$+ON8d%UO|^dYZ6G6G zSk4Wm^C6+AK80KBSHT2^Gt1Z2-aGz~p=DEp(tg(<7HCJ`>5I+Ep9OtTn#DK#hTY?z z?N_&bWr*4T&Y8j_;ni~z`T298UBa7Z$}Dx&t*LfCXoDVDtLlo*kv7xCu75*Hqgpw( znprJk;x1!rxU9q#R$|RSS;Z`|4w0sdkD;{5c{r}LB%}xYKd{FA^qVX zn?i63$IN3w3Gd9qD(*LlbptCAzsV~L9>{n$46_aDF9K>|mKaIGJb zdF^&#vrQ2M7wMiDD|57i_O}=-0I|0zWr4Uu(2ukC*6DLR<|lZ^OkN{@KF?HlWTDq4 z9`qTyWdog#mCboHu12d9Khx5eV2`_NImbHnQHzu_#zS5MNhA7Au}03ePD1iwwy(Nv zQ)>kajZe)ap(CbYW^hsvu~ zpCH99Rqfz*AyELx;*zv_Q;3?gQdYkTaPohX1#}<9u7_K(^d0f=-j)6l_`FwJ{utf9 z&0o$pC)_FcjaTJ^Y)VUV-ij}W&Q$L;zttvUWb<1UL%R9=P1!#weUA=+gRy}#l5bB| ziI93ePpL3=vc>7{)5@FrpSGRbKjoAMUrDAHNr;Xs^R#~iyVm1(+53Ed4KcGZi!77dNA{dXPerxL3Px-WEq8;k+fyaLB zg+b8%t+OEp$ate8;5orutO(|d%8ej!IB+wC@QW>hBZvKaA?!bbsw+THg~5IgkVlQ6 z#kyx_N2mzj5FPaf;VR5qgaA0a7~J@(8REzKZ|`lW*mVbMi$?m=&A$H>(y~B&<_FOV zdY(9=ewMf34UEHVkbvC1Oss2ybTr+!oRSPvi(7#&gd#~PPBBSrscz7$aet3vH>H%E z)LSX>_%qqC?yuOCvS~=>WYoPB(6Jt~{=maw=&FHb zdoB+Thvg{Y-@kt$(raED0Sgt~cq=q(=Ch@mXez?D$p+oR0L>XF!(}RW>2~O}J>0?? z9XoaF41cJ#y8T;4DV(0s|3j?0foPR)DuK}zpK?0hO%vC=A$F%<8SkaN&EiATwV!Yqc&hqTGI^$C2Iv)7pa0-I<;3v?**yfjwWX zahqXUzUe~)YdC+6`B`{QM0ba=N~nF9#W`-dW9&4(MQb(HTNl9q)k0&=PsV7aoEi5P z?#>;?ug+is0q2BX>Np`PLn4>lfF=QtBl80<%Q?TtF3*j#I@^o!!_6<5$?lK#=s>;% zvH7Z-Z;+yk!RcAHvk{`rm70aNc*O$r*^_tyGpyjUS{}KSq01w7If7>(JNrYh&AS5@ zrwmFB64hCo)!AJ)`awaz8tt7Lzhyaiv&rncPx0);#D_j57(=B;sMrv*mQj+#x7N|E@ z!l=&umKrdZETm#DO5^pxc@t#4Ll0qZ9Rje$MQE06VirMp`i+kBrNxrk^FzL`o^LU` zO}1GOf#h080Eq$V|FjtaJgeOpSqwY{ACp z-8Hd*#oqCk3iHVY-)96d-y3NV z-R0@!iipj)L+EdXTjRuV7+kM08Cy5FIT8Vtt|gLi(F^o0=X~n?15|(~p-S6L z_Xi!`ym_C(zeJ<~%F9LrQ?@P$fwTo$^R`8v?!Bd;N8Vg-;fo776I=<>5wsCVtwq?y z^}xpQp}uD^S)n5jjmzaLy}=t`BoR(#(gbRBsLbLeA&kF3i2q)w>UF}j+(<$scTErN z!4Vb~r78rM1gm2A1FgamTZ8b|16+P=4}oL!Yk=*J5@^h@dX=8YZY}4Izh^5k9 zZff*iFK&{FJYcAQFG|4*nzEWVmP%OmN$3=ZTy|8yoinm*$u(DDe#n{^TegPm5Q&(X zNB@VH*(4&SSPv+{vK|d>*q&1BY6mm0xyK9TJ<+$SvP2jH03JR7)?KefibiaG)iCtUi>4ZSQ09VZdgWLbH01{5AM>He}H2!-uD3f@^ zR{sA!WjM8AB=rCKTG_?6elkp9{&?E^`UQRN$F<-Yzy*z|y$@s< z!~wAZf~ctnLfV;Ljy~)!0j7=c(;nHX6s#n_Qo%oge|Nr5)P7SfCpt;T2bykvP2ZNr zBcr(zdzD9U4x|l{n31awHb+6DG?~I%U#F3(Qi>>IF$24`_8)!^n@qlW9qa`=-Iq>Y zkdRd~JB6sh|h@- zvL`pU^1~iak6?fZ>peC^p{8YoCsFpY3U<4HP>0L;llz8PuMR1J2}kktAtAB(5}5KZ%JrX8cn_QN_jkB7p*N;tlby7F6JtafY56qZ~Ni8zu}h>?qkcnpN@f~2zZY8updCO>%nR>_g^*|3G%QruHgfzYPb!+J@ZSh z0G)Wp`p0(N<-ER=Ts4-spL>`fFvq%=`6AzP>G>yn8MqfEeXu9RRBKse%$NQ?bwyD zK`HZ!V3Y;gPe~EmRw)4ZQZIqAe=zx)ALXJT9}^EVyBq7Lnzxbg1Syr?!^^{(_i_-r z?+-#ZerSTlcqJ^jN+_4g@ho>{0;0@1jmV?ai@z(QR1??@hbn!Ys@$t|y~HwT4l{0( zb_@0-Z%&Ks!Gd0AeSqvtYOW32myMLuq=rG2pHKu|yx4??+G#+3)8s5M_=1_`%Q@z> z@`ZfYw|`{fLtl_r1?v%AQ#h9s3I%+6eBAy;J7T2#2w2Yye14FFDjn(hI;m7OQ`Ipn z@nTWVIxxGF{F@N{r=udX9-RQCU+jObk#?@>m0NFKYeY?(6Y;hWWG2XGO_NFz{BVln zsU#X9J)tc*tThZDO1pF4)^?scntT@A4#)atjk$PD)kK@>4K1kMX9S^}uXE?Ff+|Zh&PT=Z<4a`oPuP6^MTw-nljX_j{U{ z0uiO~FO6`jBv45Sm4F?ScdOP{od0Ssu-OFkg%otlEK_g$mRIMl%cYx%gxs$tyfHE4 z6ncNT!92A#W#K z%C${D?zXss-=lX40opru-FLuiKAXmZ7#%rv+SSV@4=4~f<=y0g(1V#8iu9_3-j9U#w5Iq^0hqE>|r!k=PLh>!v#Kd(8rJSj({xk9TmII|DJ?d(CzDl7QjVJ00<_;??~Rjh~5^_BwQ~eI>A>eV8h0 z%ihx~#MK$Tz zH(X__mq|MepJ`Xfd-#k!n3cxeW{oM^OA2ilY$J^127QABXk1-}c-YzY&LI!|iF;vQ z`8ng&kXn1MZ-9#n(PmL`IZR9$9W(Gyc}{TAfH^&v}4ihtp`HNI#fK;#?@Hr=~hH1Z^6j#o-_Y+a=I1+pxjS%SaOs9(?)|rgWhFeJss#-@a#zyeTg#7cu^sx_kP~jJ^4gE^%7>1z`T46n;dcy6`&rZ?%K4;5_X#=1lNxApGVK0vs#D z?#;mD7=>ek!SLd_TM84NcuD?aR`AVby-5%_>pbWM^Bg@;LHho0%v5^&ruHyPqva#h z;j-;a2Zm=xC5|GlUZO`Vr_OOo`-;S%ldD{7U!*xD3At%AhkWbJbZqx8|O*wW`P<|YCSRaPWkB5B6jxC|w>shPOJuA03umfg*+seb~ z_`?@{?WfR~24dW^f&l$1=p9dvRV}XXx5=3G^2)MLKlM4?GYnQtE=Qkf?vR=<#Q*1> z{hxz&9#R`KHJ>lkYFQj>Ioh&=kNAfCd@Xw+0x7eKh+Z~hp7!J~oyt{9E~gUrV&qMP5{k0QP|2hpW! zslzhDNV?y>gx9g8#mgAd>m+Wp+h1#hV}Wn;c| zU3|tFV+pUc*uqLF+?jPTRvvQXzcvqAc^hNfR^^{M_q@lC8pm&)pSgsBO9uvM&)%EC zNU9zFK2Jd0xK=!-xc`%S53~lyt(BIWKwL!qyW*#`y3y2A5gHnrkz{S4L7`;j5&OPS zUCnXtNH5k1csvrfyn)f=5A_mE;4Jtny zN#n1BUYhtO2l;H?{)TR&_BBR`>Mod1?r*yLq503@_1KIcmq)rBnF*rXnupMUg9{f2 zmH-Dn$CIy$$?l6O43pVqX<+V8%Z3u#`q2S|2oQ!JtX zR7nlqK)q%CB%rmmw!}ox8~JWZH|wWcP!F0oyUFIaaYFS=3dtO0Jx8m{TOP1pVZHa+ zi7=sW6Iutd5#;lazOQY7QN|RQO3Zhx?%i(vEgmEmdQf3@|MoVfU*%k>p7_u)2FIaf zjbGk>Zv-i!Ocr#OJN0&Eyb2_4eirEzKgBMGwfVEv!kI72H0kCJiUa>4;yk9+rpT(v zO~GmFyo9&#Px3$$?W%Q^-}R4~lMh<5hmsXDpKqlqf?HMLp7y8ra{pJP%z%Q;3_i<3 z-Z;<#k>OWuANAyCFH&l)AB~j4_jW?^za9^?(s6XwfIpIPC&uN126ge*?O*PY=i59N zP4p_+I9R>H8ffd7O_!^iKK$~&-2UMIxB8dLwBS(dc?UBA$D7eU&hpfo1u-D+Z7*Q4 z&I;>QU?cAL3-9J+N!?P}H4MnT+y=s#(xN~pCv}aW2_re+AwqrO#Ii8n4fz-wN==O$ zw{+UyNiyzonKIwM=3L@I_WH7)R)_oLg$q%hPz7eH3KQk-%X)BL6^N_n_Z#(0(|Ev~ zyaRC^*c|r*(ztS|1#9MqY8x9}T6qfBahv{=Afxm~- zdNS{qickjQ^fQvKPBQin3$$hRg&STwYsgvJ@D~~AFaK0}rEj0fg?)!m-YKJAw##eR zn@@ie4H9{{Iiy}{P|guT>8P{L+NX$-JVT|=sx7FFwRASsDi39=n-`ORe*t8n%Uh0N ziTnitYHE-!4Y|+m8=X*x@i;_r*sr;I?!n|@w=X{r&F_)Q^NK8 z^`!8T{R$wV2@8djFDtf-Jg4xLPEmbX%9&HQj}Jq->o3IBX;oebhSTz#CRk*9X6)zY z*>$@nXkYz+M8r!#gf&v+87mxGPeCU37-yURpe~h^(pJ~y-ak0Z9yz@*;?xmo0%z3s z_i|_NGx}N<9}x5l+y2*3Hp0NbgQQcmjhyEp`D1Gh+iFB4I!;>{oG}{cJ?;JB2X{_O zV>EN=ozI!4u;|Cv<}d(bCOf2VxGm1TOgbV;P87n@{WJOR;;%6Y7<-Ft<-ox54xBAm zhNr9k%0!ewxk<&=qV@B$TAU*hjqhtr*)F`(AOd-fa-BCoe>@~zKdlLPk41>{go|IN zX)};3$<3ej>KjhA92)|-vHDijgob%d3%%#o)zgRw{ zwuKG2vun*i(x}FDweUJD%QjflJBR*}RTEpGa-#k)TdZC#2PrPc1ZYUQd6V$PHm0`bV#6#7S$+KeBSGl z!Z4a`j;=7--;?%Au!p*1Wi6SWi>wHyE$z+TTM1*lx1JOu?lum*w`hZ|`&~U=14Di} zUY_|c=h%_1N;Ft~alRDkooL2*dXO#cmaU~EGLDWDE<6oKGuCo<)Xzx=rif%;D+@Gr zFu8w7bBU%`>cd5nxn&drbNEK21v6l_JT3gTm$89`qa$FHGPIesW?`H2=N67o`z^W9 zucjJ!d@imGA>a}=SCWCrX!FOIOt=ln6pXwbH*j3H@~9e?PcL)E^lm?msrP|`ueg)2 zj|t%J-uzvo?}|&Z9DZ}{r*Jqn*kf?OL^cr9o{fm=nevIZ{fxjoUEY4_&T+NLr19Ur zgN^o)NXCksyvAEUY8c#O_JPgQ>`5zK+|je@;R^Hp+x6J+w7(`@-!QoXhm4@#q2E z!fs|krxxaIR6Fa7HD<+1VL5U_x-TRhxZzeto_c*Q#c9L~N*nj_!k_-qo%aY{DlVL5 zDXtsYK8nnG#1QioRoDUjmlh?VLx#CeVdfmrZ<_!*Zr5(6ZvS7#uJ6k5{}8|OPRv_( zn&RC)&Jqnme#N*q{B@Dfx_?TbyW&;C@exaY4Dzia<3wKz-E{XDAz%*>1VKA-5D8FK&ckpTqe zTC`cR#uW#%^@!8TaEzJtvaL@Z@_KMfYH$YG1Kp4klwgV9G4GJV;Ducj8e?ko7X zK&FSA^>k4L3vvvPk&QpC_tU18L);Q4(+ zE<|QL7`0i0u7>HoZ>D7SpyQg4vSqa^`(}PwTKDFj0{gy~K20R%Knj5ONzI^G>eud2 zq!Fg;c1Nsui}^U=wZ1Sy}Rd2Sv0sZ%9kul~wSVq{b&|HZqLE@3|ce>|ntx!LygX*qhPo!st?#DrmC za&&DlAuuz)$m{WPABNUuoX&2*_C;dy^eoRWd&~%>AQL|2-4FOzX@0;oYFf=ZY*nXR;BajhHVJT4K*{ zinAMVSDIpDd)bJ~(nC?#Y!+gBzD*2D5>7M_N!oQdI*C3>mV=fmlN58a>ChIOy}sqN zZ+=CoORlKyboWS8nJ+|Ybn_B-?qmhdOuKxb0@K?+>- zVhq*8H#I-RH5A%|G=1hc2cz8MzW-+jwjkb2k9CBpKc%&Jn5p@N$n_EKgxUEL*D!Q? z;Nurp6@t#J0q>y-SApY>53T!J+hWR`<8o1f?RxHg_DfjzfZ1R-6(SD{Mc!oAuFSge zmf#>HkZ%NHc2g7AmCx|sf#U91=d(RO2AU!J(@>KWU3e;Got2_Wa%^sJ4zC3NT>mqYX(wJjv6Z_>=;}h&>hHv6m04AxC$Hr zg>>4Pa*p?QMD!4wrqANh2hE7Xp@{dJ=jT|JsKxmQ8CE$pwQAV@DeJx^Q9-=wPh3TkSY(0{E zXY=ZU$sm6HFyCP}E0=6{dMq8+Vy1{FFr>o}1yXQjwWFlx!gq*m4pvvR0MAnprBLYM0{YYR=K3Hk61&7D|h9dY7Xmr6b z)ISkR5zwbsE$qTl-t)L;U%e3-S)cAhX)YW34bR9|tvb>jn~!bygnLv1^i~HAVGq~eC`|<2h`b}6Q2@lGW-o5~rZ1IBb;{u9frl8|3 zrD(U-9pQvTkZf0lxUT+ZpUC#vEzI!BZ*vSw6GAS3e~dKNLyDW&Q|zjejZ>X;g{9N} z9AP;kNS!D$6*ptRvek{N6~+-j>9B46G}|k(F=y7fzZmp`=V)!r!3t=>P#ZM><$P;> zj7%_~g)TtoFfNg10*h}m?K<^ZAr;y4f$%ng>mT0fx;ouJjhW!Zg$H!CSz;zTHC)H~ zW8BncYlGIkQ0MI=JT>(GGxOO#lsVF~CotXu|4i%JG29n_yznQ5M=`i}bX(>=QS_5P z?mLrUqy+EckRy4Axw#s^`j^cyGoOab7U9nS+`wX4U2F5<6@Iz+{DF{1zS$ehVIFnT z13+-0eiHJ6q8o=?_vLNf>-ztc z1sxUWjL(!-ZFVd5k*yhKea#0PHYlFCa76bNf>pK6^8W=-06j11JKqXq3N2Qq`F4~P zs-n#IjY1{T!}+*T6)*!(HX*fQ+Js2Ugi_*wT0D|ZJ_Up#kSH1FNVMJJsIaEFIsfQ$ znm&u=qstt{3S_9LfJ@WNwHIY$w7lvI=e`Uj5`No3cN(UnS-%Pkx&|K<3cX&=ke|C% z9p6IWbY`1!>v;xxL6f*`dq-^k^Jn1p z$4EP3-RdU&fI@MRXaaY9m3p2Dpp=H@W?dcb&sxr9C$W0eaJjzK5W!r#Bm~wCo^7pHrI?_<|+@;IZMN`zE z8Vy1_jwbJA*XLTJSq42uH0P9IdPvhb5~j$~XSAHDN$16jTk?=Bc2OXYJKNRq>X9?{ zX^t(g2IBl>0WOL@uAzsw(iB@rv-ARCgw3&o@vt0HM21cAG3C<>%(ELkf+>OrCnKm`#a8=Zo9>y-@ zPyJcqTLfN{f6ECP{rRwPHJcGpYpHq>soVbmPrzx%B&49gYu$^y-`{Uvm{;%M|M_0Hhh_;9!T@U%~nr}UbepHjk+>SM9su#W7aY70d~O~XB&YB$Y*tsw=XT%>nhEWjmC}J z)aVNAEiD>f(nY2JHT}@Hi_QHzBI)R>B%JZK(6q9sv$n7~74S7@Aq(YV0eVHRUD~#K zq_QVdBvWeXPI7W9QGl=G6wdxEs(aR2joRjTG>vnw8$+=&At(J+52`JBc zq#&-%9`HL4=F8Jz=$&(J0fC;m%f968B-%+O1In+sb+((KxlBXc2dhtGN&R8gc1ejU zH4a&mzTQidNdwWR;j{;^L_g}yk%rp9MTdT2fm@*kisziqpRR3BnyK-B)udd^hm_z5SSd z(z?#psd{alHkIqX-e8itHD$Ow~|5}4N$ZcE{FLPTi0Foumee8p&M)z z7Cl85kL!dybS%b-ptUCz-{3r7pCe2UI@O3q3K!B;W-ur}j9NES*#k!4qXZ`qN1MFm zTg$*8l4-y+)26C41_UgOh`xLLARpf|PgaB|ic*3#tZN5As^>t4N@{9Wm3y)ZOf-k_ zr`DRiK2a2Ghh7x!?(W`|wG=khJ5@-b(qkW~@8Zdm_&vs~V@C%w*?zZdqK}pf&;On zDJ+?X<6(im97ykkooPH7(+WF3EWW_UME00KvStr+d>u_tT1D%lqFhWRDon=`(pp-g z6Bl#nJ@`JnU$Y&$aoKmfH&WH3HM~{Y77gZDRlxlu0xj0y5bmMe#(**F6hNGc{8sU{TMu+ z;$z#Pz?zvWuG7k|`nUS`xX4jSB~1!-izfwxIZ!&ipG0hjci8wiHBtLwl_$wo6+*)s zYhWrA*U=elW0d&Ba-loCZwF}G&Iu74uy@FGTMHBy6P9vurun2%T&UeQIsa(pn7vJi)Mm3ufy4C#k-&9MHoscK!ki7!{vG9mVsy7|knBuhlNb&VTx}S43ituxxGC z%5Be6XG@>LtCU(eNnr5@iF04)BQy@|Yt-Sc@Z{YPxXtXIY1NUYa|}EpgWHPuTpz1= zaD-~4Pd*`7VJeZXPj!lXug`w&4YT5{C(vx&3|7$;>;M9NPQF9bvu9d3)C9O+U{=-c zZxoLz?mZ{&CWKjoe3JV(DT90p?nBnFdT-NP@;Z48%KWdMzy>%t3XF~uG6psx>#Wa{`!S_xv$~TvFU~6}HWb?UL8yXkDc)fK;DIX3u zsy3Kx!_r~g_Wora#&%;tz0O(+3lDGzGf+$4oM=z4yx0g;MRpY#aG>jW)-&s7*8z^O zdky2@UpM^kC;9;bGOJj3zW;>@iy`lizU4nB4k7Q9O_=T=}WbKT85Y zRlnJtYdo?L+Vjy{2P6Y(-6ky-0DO8=9B9<7mOa0}CAR5JO6G?lDEvd8na=ObkR9>m z5A?N|aj!A%pYIB-j`4u~-&V~If4?T~f0L9@xE!ih(AC(|=9MUgF{g*J`f}0CBQx!O zJ~|@H{RkBK#9yi>_Mu7y)nUd2=B4Hc=A3Q=1}49$qp*_JnUcf zbz8cVpUr2YdKR;IQ8_v+)Fu{POXx`G)r>}aei6xR7Vy5ITNJi_K3s2WnbX{m1l`^EWqAL zHbM(l6I7UewPGId;JHnrm2)uS=rIy|@#J*hCY&2w}vL3Z792c?d2W;3XX zSces8?9M>YjsTW)frrn6XO^|?9)l%^>hz)<1i>2qwg2^cLTK24FAY9el+hm1b4Pfi z32WsBKP7(If45Qvgqs?-Xm7C@S0@t8Lg$`at}`g78msSafo6#4svif)cegxGq$FoL4NztQ;pF%dXGp;{aCI-rV0AK zg&%u(vDWkCXov!y1MO|P!BS9>#pU8{9UP_ZsR|_GX1*+jQC7y?x};1^%1pbnsz{%7 zDE}*|_%;HI3B!a+K&97#1W`C0zZTLfGq2W`zO?WKZR^!+Wv?OFNY1DFZU@@x(VKhL zF9!?I)lE?_JUAhB2Z)nL^%V>&Y=zFxPsVitzR3IS|;re#tptvoSR!cFb`2Z zP7hW@pmK}a!Avg=zio_X`wrwt+zmbq%~1_tm-BsW8vX~oP)>i=}N=>Na{pu zpNT6gi(ACFvBaniYZG{Hx`JD%ykD%^Sj{TYs5%z9O-$kFzr-xjD@n!@&Mx!O+L%HP zv1ssaN%fYm;ZR`QzL5qp?fD?T-N(qU~n&}s$7>@M|$ zdRY#3p;JgM5pmG+M%VUS^~%;8CDw=R42Y4P$S$WwQPT%jYV@JJFI(!FG_UO@_a~fUxR@R73+IN z_hQkc`2*I_@}h3Gmswf3THkCLDAw*~OzDhVWX+V4{}&zEp2JjVC2cHi;%L%m&V?~r z;5D)*F}O~5_$}4Qm7@vWQE8IDLl^7XM-e>v-{hPkcc+YJFyCOvpLcDj(}laUuZ(De z0nU4Z+4^sie9(y1F-Og2ct%ujRUfWOPGXfPgNtkmm)0^i0#E)VXr@GivB@#U{R2z2 zDY4bY*0%KWZKHCRzNYbAI)3arPh(|{UX$}QvJyb(%uj)5vB$( zhFw=_j#7p=&;Icd#5QG#FNdk`ZHZ3<31k3RNzlID{;*YeK6QCa;Oxr``mu@Rz8T-4 zD^qZHn?)KNYclJhO2nQa6{4Nd@YsW?D8AI8?q|gQ@Zgg-lIp$qFh^eYxB<9Q$fM`y z>x+H<)zpyn67Qk!Idk6{p5 zi$dQMSxaRBK3ovodkQG6VU;+dfGk>Fa?30LC~KM%SQw=dy!>XkRF~pGD=_pvq#jb2 zx}&^)RJVvfo6^zb;PoC?GWT@A z96GH24@@hG9zy*nhBeHK+}%)IgD;x8b#1f^#BB5~tS?TDH1Ut#>-q@wcypj@{{zv8RTSEmsu28zyI=SLbG zbQz8tFRyXtD8q2UpX2vv7|kp5bhv&_nD4HABw-u5*y`EKDdBYMJckc8Qhwtv@%VN` z&*^wwXf~9fsil0EYwNk63Uju0Etx6NDHJvqzk*j1nt?R%u}iUVReM=(z;~RrkX5tQ zbJBNs%YPiH9i!31%K@R@-()$q{f90oey62%H~vS`OGFNUH~BHL(;;pY_^ zR_A5Y0u?7I{tkj%@AV%S6{Nsh*+Kx zQ3&rliivoLz4G~^UTs=d405IUG`XLzydaC~YHa6P``{Yi>6KEcw^^hslU@2KCJMMX zkl{6IbMi)xCEDNk*-cNxn{W^Ic{iwOAKiTxqL!Uv1OA)yl({$v!igMF`>tOYv6E`> zzSr*8O-!o?*|7^`09PsS)P)CTrGVz z>VrRF<#kvm67s%@+u(2CpS}O&Wb*Vb>vn?S8VAGb`8YKAkM5VE3n$(&yuiNOgROHA zA+F98EPZU<>J-Fn7wpefz$v*OhpobO<3Y6%UOji}6mlWgv5#p>uI!LoVXzY}Lpu$w z?U`J^+6pI?eJ0&kYXu@Jr;rNpM0Oa5B5EyOZxx{;7T`rApnj8y;r`e}D=R&rzIS1Pq$ z9i-%xogTE8_o*~|<<^?%1uDmIr<3@=#F5doA!FGThs%E3{d%H{0!-4hDRfZbf04BJ zdyr}c5S>B%^}o%&dV(+C_LNp`+`a)Zfn>OMx88bQ$jRu-(bwiaO3av3QJxxy1O_zg zJN_jxp#);(%!!k$rI8JTC63*8WXh0A!9_;ZX%`;j>-`DV;v4G|vOMfQ1EB<(Bz8-6 z9e|8Ov&Iw$8JR8QCj9xvrkFvWl@3&vw40v5ho5J@&_w6A5@wd!Se^L8)NGW}1^>2} zIZLOa7=rW3~{Apcm}PpJh(c|ExP*V|u9tCfUxGiAop* z|5Do(&D`I!%ntyVafh=*&YD{x{QQWG^n~ z!+wVu1>;IZ%MNk)j26*1(bb8)9@OlvO{9ZIQ%YaqwOR0~ zB*xYs`>4*R3AEhjkg)03n}Yk;kvLykbVo1-B9l?7-J-?M-rhe7pA?cR_P)j#Q5u|_ ze%c%pA9Eg#G!*`1w%l6HE~uj3A?kprsDQ3gf^>0M5vLX*h#8KQ8J#_VTkhy|?^ZTa zg$(cC9%%YcU*Zr1tJ37dej?~c^Kp|~CD)!rPY6Bu%NE?Pv)X%Hf!I_=kKs%z#GK*Z#|>oDt#$rj z9BP}~o^5OXB%Sk2SSkrsI@AtPr!F6!kkGc@TQk2uTYCVKlIF`L1~+h`EwvO_WC#-< zR`cxQtta=G#lTo~R`DcI8IesyJo{&Oqsv`cgh0IK?L0%>4MD}bC4^zH-p~W=V-0;&0lo1poy#! z#G)Yhl`#99CB8Z;5e`-h0x7R}dz^od`=p4=d2fQ{8Gn6-tDSOxV94Gd;ic zMY-(K!L6--K_B&;+MigHG1w*4D;IFaVG0~MUdR*ek!q}WL&oT3=zTHS7?{wK@3fQX zqET?vupJX8#^jp$V<>XmIh*J@T0QAU4e5i;pk9e1L^(1zmkAV;6B8{xjyE%(XvVl9 z+04cOv@kj_wGa+;WMl$~bqEf(L4iWe9D6X-`q8S3%0`$ z9>i21gb4lp*(X8}JDlH-XLCtEbMm&uJ9TEs@m0R3U8xfHN@FmzvB-0^9evCF%pj~# z`OtpAApe3#@Pdz$V`|(%k)+EPL z&UC(w{M+yFkPK8@h*HO{-UG_#QQugM5BRvmvD>+)53r9EJmIhh$t19igwyJYTDe2$ zyGN=_VvCw|Dh+34!kXv5ff$U8R#Qfjs}}I~e(YXrC97@;_^KvfR;w3A;G5s(W++Kn$>T93%6{pAE&V1GtC?3~GQ{}|6KzBn zby^y$)^ndHx?oBd5CAzeEkkA<9&596&>h`aCxU&daFBY|sMXPdmf2nwjl5!S^sH5= zQI%jncgjYxpTEHQ@PRf5a5gMNv_Kk3i4`f|qt7K*rno;ka}c%kMUa^y#qStED7tC6*k!8Sk-Bzk@eN`A`K!SZQ;B)V@}gGf48Vw< zEjB?neI?{KSagC8<((5>ABZMzT^APQ>7ce)Z-Ltyz+}acS9zS}Bm;l{I*}}z;&5a< z2LGIcL;ss@?8A@V#{%FXT*l`;`n%k99g>}N(hSfP!Zm?_d&ROCM*f8hsrxuKm0UIu7^p_#a zT40KkgD6W!kp+jkk8`0NXPkId*R(MQ#$D=bp0?qHYNKm$Q}nio(Hs)~AyknkTGqL9%JzNE|b*;BU9WXD-V z=flItpzQR)X~>`mGlAxke!FhB>%O&9JEZZDJ=LJoql|T>UVcd~NuB!!eK#ijQ*mPf z>y^W$toJ!*pECbhog-sSFs-?RaG-}O;smH)Kh&K{4KeZ6JbE|1-hzKVl<&~23Sb5e z5>5qAja)yyMsg=|$9Qh=g@)un9?*ZAR9?;TNh)3I1MD7$T? zcTj>#|8t*V?ZjDV_fA1Roo(zZakbs~n**tTfGxF!>sRXM<5;AN_3!_9f*r*2ChSo% zGKlMWr&7w<8~ME7G?jwyG-KDpf9A!0QlJ)|=McUKyc?GM`yPYJ@W?JtL{_{%--vzh zDy&pwtV{Z-{1*=CY=>jfZw>+4JXkDxi~bOPn!vBgBnd7QRG5fm-hYmS0~RV`-V;?H zKb~HCla$KQd|hTI7;8qNpsvRI{E68Wf{dch2}`d zZXtK{VzMeP!fAWfLqi2~s11U; zI3_3m7JJM7^dF@AvI&1IrBx5J_zH~hY(7T^5;m8eZghwM{;zNvivBhOdktBM%Xs~g zO2GJ0I&sCvKt&@O9ahA4Zy;Lx`*;~>R8d3C0Nr3IsG3EKhcd@GUv2GFq9UNdIO>bV z(Y7os*Q{?A4%%)+H6!^eF}l?gP93q}G}y9(v<5e$(a0Ey7>mi&0v8u%Tc}~9E_OH{ zwq?4qeTsK`B9R8OmVhb${N4{V;#Nwq4r5A7(v1 zBq3oT6nqZ$Et2`Vnn-CY`PzZp{Xy4;!Jbsv{U*B&DCM2}RXC2Q`g^E&Y@Ix*x&y*q zKhgqWa*lxJ>r7f~7CU=RMH=SvO&8)80Uusl0Z+V9QJi0HY~QAdLC~2m#`e!B1|CuS zfRYB;z|D|$#%G;pcto5yvY?285k2vhiBglkXq>-7a$!K-Pc;A;yvDH9JG%E}tlfk^ zqLCo9JS#J}!EuL>G(J!ohgWjQ0@iHW5LYK~|5-D<w07@NpItN!HO*DJp-L-8Y?ApO3A=gn*i$cu4mFhDT#lxdRX3th;(b7AUJ z%N`)=Dl^TGPnM%H~#BEYY-xD;2Oo2H{i&1B#&YG9asEsfF2#K#$!{#_ zZ_bW6E6M}xI^?d?$Ca+*XGl{a0gLq-9KV!$iN_d8HX}d67i|aX?Vf}sLkmMgiQ?&o zN}~f_hz7=u^=RfSuxs%bj(~=zT8{VVzWL$}QnqaZD?&mDum`KO{AuWoA7E7??(R_h zKWPu(?wpbiRoT4Ho=taVg4fnG9Q1X|gSZwTx=^`7vX7;L41d; z3vrxEGeiZG*VV&@v9+HK>kyT9)F>bl3-jbQi{yvJpVlZ{ldCtdOrlELzeJ9VC*#z@ zhV#ygW8SR+Y?N>P@-TQG@qg|sWpD&9L!K6M3EKHQUYR3akuQ?fgklySt(&{+Ofv+` zM|NwBEx?c^LG%HSD3Szn*1Zn^nt?ix;C?8fVDD@S;=Jh_>HEYD41WAkMj0!NJBwZ; zTO`MUcp9eJ54o)dRy4?g?Y1IO9|l!-ACP+6&3rptqx0j-<26C8r3zY_eT&Dry`j!g z8C!5HeWP-43s_`923)REN5fOTN^?JEtNK2wH$l9loE%484_y7)01Mdlc6^AW!ky|F zbK}-m_;J0^QLgh82#8V5FyN9WG3t{r;#4Fr%yBZKsa__M4feOwS1UIs`Q1stKhqaK zLOJXRC2TS)UZ^FHiZovn`~OaVA#}uGReiSpcM* zI7kx6WYf?AyyBuVq&F+GP#r>*kWRcmyHS>n^SM18;6E&~tcvVkba$canY!YkI%bNF2QMq7lRw+W?olwm>+Dd$&ga2zOBjq9MTkX^}CtSUXD5SuRXne=6PcM(p*m55Qdt(;$*w zSqV3}@X5MOuvP-CC5%-#Oi*Z5iMDol^Z7r2<9Tr=*fD^0U2C%z8BQtPq|<4bI45#} zI~L|A6N}Y&#&yC!4_B3=UUcuRvuEjPMiO27G@V!4IMmd>d8yCWbj9(+V*h=Wn`xq8 zFLO{b@Rt)S|MINyxdqBD78u_9{_N-F&cMdlw@J4Jfz2`1?%rGHKB|5ugKho)5m_X6q3??Bt8z{kKktt*cFa~Y zS0^Tb*T=B(@EyWe%~r25!>qz~vUFziNhgE?PC`;eQ7bu~{L(lf zgo9-)50XnCDwO}C#*CTQuNA7u&H)R&2L!Q9pVOFK7kpkGoDju_Ppb1Zfa5cEq9{Qy z-@y^cRvybkJA1n1g8=aaP)lw(2s0l|W%oS%owCa#uL$nf`jia zb$t6J!b0LR&Ac|bN57T1 z^iDzH@@K4i(v@Ha$>yzDD?S`=6^SK6{XHdi${sTpb$ThKa3;fzIb zgQdG`cLR3cr>)vcMY1?C!uA>6XMxv~T*y0@M(wSF3(f5R&s!6{bMV*A^4vyjy&7hm zt`o#f@P4{iylq&UVmhw27$dMzzPoHBOE}{XMsQsron!P787lnm*17mXO&Ga`#&5<420bGwap zeeYD2^=g1g7zN;kEo9SN3;q{&S`vdcL$JsSHDk)ynfnhf<_*~W21jm1YLf(U1j;PX zLi)ro7V5y7a|OfjkLq9WIj-1}1$AXK88`0I+FG)$#P>s0IAK7&(3WMTT?owYg|s@M zkx!@j!&^=MVy&}HTl2W>HY=bx zdut1>#WV_IKEApY4gn|ilLrgf`-tB6aWBP+bzQ`KI4Qo9LQ}CJmMI{bvkuD4q}BVj zWT171Qv)9~+9bLKh*{&&CXiWE`e#eWmYw@peTm)Q4*HitKjqjVN|_4J7&`k#LNfZLvQ5;`iO~i;Q8&)bN!jZ$P@N~)^Bm^D=Y38Jh(Mr3jO;Bsi7(FZgcWOUe@z7bh<wI$xooC1!V*Bhr?uW;5}ka|-l1`QEXHAQV-7p0IW53f5zJMgwnRmYfXM)`?xuRu=ql-At5?m9l;h_`cSFBhji&RcrBWc-$l<|pI;A--c~)Gd z6c!SqJrF~L3tUfMq3CP?)4K@t*O|6C8;wH`8pd(@85Vb8+CBw?d}Ix9r8kp5IoZYt z4J>E}H%7qxXk1r_;Z5Cms2=$dSij>obYKv=l(MPvP;NEneLt zI9O@;L!{o6k$MXu1UTTxkT{hoEqzYh5gu-98EEjK0NbM52P&f8xCid~cUi}T4$b&C z9V$in+55F>4f#N7ZklHV$L|P5svT2~ zsWnaE5IadjS#R@#+0Q;zovt=vDtU#Zt{|**0vXLq0EHUd&jibJ5fc0t+cs2>TDQhR zL5VJA#hmR}3;+I|9GI4z`bE>U&b4J*zz72+btU+bg0^l@O6;#>b42#Xdtlc+SxhkK zbXrfdEz6C&6VeADr&a$hijk3aVY-QpUgqZo9ntFJSTKi*i<} zOcs!?=ZBeqfrtNo=E|s9mP%&_Wk`n<2sHbUP!Y(|=lx;9+Uuxof9F#m1Xj!t)bnkL z0|5S#;D>*DL+~gGQ?ZHY7U-+R0ro~y$e$+7ZH2m|JTTMOMeNPYo_?Y>WUt8YudY#7 z%2**`e@*Jj`y&`8^6yEl+PZR$w|MD8jlDl*B2tHiF)Mag^79%gVMS+UV&>)Sd_BkL zZVBV=d^stlE`F`;gs+ugZ7z&;`M(!3AhBZyds19`FrS2hju%*TKJ^ZIp(E;oww9h<9DW+NFODj0YV!Rz-9iO=vy!b9Ec#iN7s+!$X3qj=duRPQ zs?lF2=vrDz9slvHra-<>B5uPkf7|~+psMDIGe`d>I`~TcseRl;u%#69$=)gme{8;Z zW@x?So_nB3Z_^PNvMIF2qF03(Nnus0D(yHtoi`cm^0^oVqdw%!CQwaO&W!%o64!&y zAfixy`i>^tIKrJ%`iFaSiC{CAGWZ^0nbOD>2*ZN)H_bpXfj;x`*N|plE%89~(@Ypx zAwWOLwsFuBZy}zcutDirDA5?vz$#oAabi!EM1ac|vg(casNLdT4ugrI{0LwKOdl?` z7*f|Q65tF1Ufrw-X_XUCY522+xC=nhyAoMx0}UAB5SJU8CKY%-kx!a z0HWq8o_q;K|8J16M^}FkUgf+tS?%>crnc~?SJomA>2=lITy6AQE5D-m+4a(HE0$d$ z(CevgZg4k~Q<$hesQn)?r20GD7qaa`rr{;AiDp!fSU4`n^RuRf=7Pf_NY$l8$C$twc_$r4-M^m1;Y6MQz1~ zgIJ`ens13TIZv*rXsp_Ft0ig z8#YS>K5b8ao7ElEFQKwnTfs;B$FIa@y4#!obUW#BPa@B%4<{0p{EHVRI;#+21sz2q z10wHORFzig7uQgps1FQ>BgDU}5muk5Wg^gLAvp$!Te+x!YVB#dFFD8%%sNWYe>JA) z>4jFH&V|yL5U2BkGA4+A2ACy~D}z|H>42I@{joPS_2UsAaBhKEc}JM!>z=jfQ`Vnb zTRxG=t)4tDh{gdVg3Z*_ooEAQIEwa>_ z-LlWybY~<^w=dxi0U>Xscq}%^8gi`{V!wOpaOjgO6WJf`AMFmVU)$)u))t*iux+^P zj~fsC${f6{oXWbssxr$BgWqPm^A@Dx{u)K@BQSD8YPdnfnf}K^A1itsi3=&|Ec6;_ zq0nEfU-_-ueTyI^l|v3MIn~6AP{dyFNo0z2#!?=h_Ov!%8Ctxf<4*6p^qYOCJ?D{; z*oOXm$b;`#8mBaenPbN1`)eoO9hF`Db*wLwL_sd4>uceOjI3QN6Qbq)2!Gq=(&uxe zCwxAV5Lm2!wkhdHP1|LdEeeShdHx{u%T2ltUN-x7g=%*nsj3i(d#k88A# zv1HNK)qK*~>F3C$9?af6Ja_e%i0uU9gbC9FFR_m@Rken}&u_dhZma7gz_ywnCcs8Y zbfa0+`YxOw!M+;q?Y#97w6YzV!Y70LwLdOl~;D^ z6@>FgEki%*fy?vOYgEq;y`i6(DydV}*uXh{hoR_J;1taehBa=0@^cG0S(XSKu~qcY zoF;`*Ig#e%`npbGmDz=`P3!K-DxW)HO>=qQHGE6|*r zFVOt}Ct!4{ddwf+I2)10KzzPXp3rpi#d2d{x5`kItXb*pqYKFawayLH)vn|{Y2oJ6 z6s7k4(16ut$AWnyG*mt=*$3{e?z!2zvGm=h05AbS;Jv4q&u!2bQ3Bc-6pdswBUqm{ z%2Et^5!gEo-56nb-4P~nEPscNbL!5%?&n)5E5~s`0r$hf*%Z&+F{jFb#czjT}Hph zSB#z){R&8>M?ESINHpe=81s+Ysd6Mtt(OOK(2l@Iz8X zw)RNi`sqS=g-os~%JzN=KL|&NjIM^fj(Ebc#fYfSvq3_iF}EdgA!0=%jH$t$_(G;E zd!_31ZgON<$cz0B=AIlZrFCB=Fx4|rd1$V&)ezEX)A04EcTMpzdBjF!vz=}H2p1u| z{O0%kyXxQMUC+ToM{{F{kW6CruF@n{&@VIod^v#bdlW1W^Y0E*lubi%Ws$<(NXJl+ zEsy>RgxtM^vcT4GE>-JUM4+z z>X6dZ(V`2btI5*Xip#L}MB3rg&nipW?fEw=TYfi;CYWQx88NZNssP3cyPxP|{Sd7s z%z4zy4Q$_*Wdtti>s6vB^;xLC_!JR&8wR`T|oDcqtoq(FA`|8aZ)c$dHi}joz^56^s_p_ro zx5_6cT8f_3b@)qms9k!9ae~_YiYs6|3i30wH&LE$@O~NW_~K>>9}^uN;q55IkAcme6A3KNq^FyDg~?#|7)BO zMnA_t{h^-TMjkZ2j zonZKPEEpYVZq0$@e`ZkT60fCA&f?+P?dISx^CM@GEFk>Y75_ADIK?BY;s<{eX7>x3 z1Q9&@E)4vqiRBVya*I?h7e8tE4^Zhp4*^2>wzHbu_&jC z35jR+FgPQ(F9O>+DcPYT6pvl>1_Lg|w8m05@dC56PVL_dWo%oY|C?;Gqxag^5W{v` z*fag54z%Z9iGcB7*dLza;kaX%U$!55SDT#2M}AcW8P$7w#8D@oCc+62JU4!O8jPm| zOm*xgv$?#gR5;^yL;P4KJ}l14U!o)$G)lG`_1@D1I%?1-$mq4wfro3Tt2@N`D(Mpq zpSCk+QYV|;cs@k)RP^4v3~W>3Pzq9DwSfNA(sZ5Eul2kFPg3(PzG!q(u)_Pk4ayIG zd6QNS>&_c@LXOs!bf_=>ILR%z1O8P%8*xILm68)Xn@#^UX~CyOJ`Rv!V=UgbI?M0Y;jX$v< zf!a#bDIMF_+>$y95s;89#H2d`xi74MhQUngX}}KUf`7{+-CnKf4|s+SS=THwfqL@( zvYkU7Shg|C`cY9rZ`Ls8qAK4Ub@o!Mk>d1%W;!)JU{W=nV(3f1w7kSqFqU>yEyznz zu@ej1F>=n=r7w-EC;BS3r*eHcJ=oAjYVjtdUm#X53kA;~4|E@SY;;vwN)Bii^!Yoe zVZz@8!b|Q`I;SLY9q&5Of!zgtP!Q$}P`K+UQpsa;TUO7|34m*0mCTh-7eC;u5TH8H znjO>hWB@`tK$dy>ZaULcqu0|kyrN|_{_%T*>_WT321_j9cDG>m+2gHvR;ZmC21LE3OH2 zRdI4u4wpubmLQ9I|zPCY>%sONknrRCx~@X=P{jvMN4$$Uldv_6VO$8LUdLZuxO z@+7w759>Jg_-9UL9(Lf;;pcLX{H=5>3yp_ zqx(X3SrF>=`pc#OHG=Z|;|l#LtgDT}!di(?svRR(6HTAwKQ7BV&IrKGw8#NMh=<3T zgZ_%^B!oVC!sBr3Fbi?DOd=IO+itcQAo+6$p?<;M9M^!;JG5jZ%16WJfFTWdamtmG zS1V^Q^LNGU?C^WO)1ghol)R{f=CHUqTU*&G;qr)TIlZ3f-JG{@Wu!QJ6bmfn)5i6V z#O-Y7{?ks8$Ox><&zt#$I+2|W8eh^X>U4i)bspfL*l&C}ji+?d4fLY(Vcy>h@-hSu zu96bqVp%EG^^DTR52q*o;tFg7dY99a;-)#xApAXNS|wq*Si!3Z7^lmny%OyT5~5G; zWuKOYgd$``r7VIc*o&5Q`{3=Pr55QD!~u2>m|(T74Ri9UVwoZKA@*ZVm#{ z{>Ff`b%oU-BHPHYoL2b`#smmuTp<{hDcDaEhsE&8JRN?+4}Zp+mf^4wdKcMEKh9F( znt<>TM%fZXAvAT_lQ2XM3)Xt~+J>hlJ=V_bn`C-mGMIss82fLy{hop|$ zU%-A6i?I1&A3xy{X$z+F?2tCpW+I_zvIQdM)j!-$rwQT?<>sS|&(^tDzz9R5cSDJH z^D+s)2VMRCVd^ZS;_A8} z9VEdW0t9yr?oI>2-92am1a}Ya(0K6R?(XjH4#C~s=kk6tYt6rgU(j$*pIy7^sgtMS zi*nnY6TT|JQMeS@Cl1o*dDi`@8bUX|S&U+*)2=yBj$a?8qCoji`8t@0ZZI1sTLGqI z#s78>n2v#dU(@c8m_V{&(kPoSZR_u@gMnQ6cW$Y=zbe|86@&zQhA_}wyL;wL8`T{u zi~KvR?VA<%Zsuw~-KVL=ppvaj%JW3>ybNaisk3}--OwC`f0dET+-EOW8#>X zbk`0?E`Vw(ggoVzT3>TzA!$GFLbwNX&l6pzv($;3$wl?bk{&CE`y39dbj0r6D7gk~ z^`NOQ4bto#&Y7=ytGS%|>b%lFe}BhipxsOex4dqt1i0`kRLAKxt`+57UeK0<)-4y` z5(;9gK~sI%I?fn1^EtK{jo?<{?oV?+xuNW(+&x5%pfd5{N&0 zJ!iC5|E@0j*?r&Kd_09TwK_%P>rnrl*cYBUR4PQ{K4}4-meki5odZ5}s*p`WLc-a3 zZ;tb=kvFk>cXhRy|Lpa4&0c)KJaN5#&@WP#T{hL1f4X`6GQv7O03fCucycCzFplhpZrkV@O_`Fwnj&JVe6RvU$$ zpF){P5l_D_2OsgYs() zdqbP-r3OUfn)sM`_@V~`?bm=ZqEImOp~AD#Y_a-$Kk19y-kdf@6&lM;7yQuegb2#u zW^Y)n;dhUiYh?^=$`VU|2U4%Bo9LZLHlzY_E*% z%ap1t{JSwh+| zN|+G5&~G@Ls}(7T-WxZx(Z;Rbi2Rpyes{jH?s37iZH8oJ4u9I-(XV?H<6TvkV@vhy z5Pw7w&E~f^MxE6kf0dPyWHQTAYkG^6g>=)QZ?3gKXy*)x^kR|Z7LlHQ4abC0>0+{% zi$291jt4_1=69!N3Z%7GdjdC5V_`|n#m;F(Tt_pI7SN)~LgqLnSm$n1|C2Z_Tf zP^EbiSLmEydn;wjOPak%B0aROaMVMI0VU_PcAwzM>vu--Mdad@GWEt!8|kSyngV#cD*1q*qu21R8IAQ&$}&#|U^Crt3?mx7AO{rrv-ewGpeSQcGtVwhiB8e6>hz zsJfxQW~vnAY@)n+xQ=>vW042`pm)RsNu2LdGld3%eA?!_OoC3}PKl ztEhWA!?Nk0=08R@%8wfDJspOD>nX-7f33DeNjf2B|I5Dkd1_Q*06)xJ1lQlQyKm*H zakJKE5$Z9w-@}B=#Xn5ZA70JTLw@$z4*B5E0IO0HGUCE*`4Q9Itk~gfO;_Orq-X`m zwS#(v{A@BGxhQLDOW_Jdc4{g$}RjvQwQPO=g+KpZ+k%^Vcma zveKy*#R-I4gl<%XDiO!t={Uz>hQM~RJ^>bXJu;nGn1^hep99u8C_wobTbr4YNKUs; z>Mfl;MqX-DMI_1`20L|2zeqQ z$BX68=0>sI!2(o(1Slfa5dzXIf4nG$o8_ zQamz}aQ4vX8NA@+@cv6#U6LY*F)+Yplo~NyY60Fth%lidq2G6;XIcX%f<@GJHE$)F z(DabD;>5PvvbK!*{8OahUuYc|bX9WNc3+v&6!o%Pyj`VM!b-ZkAJDp@A+X2RkNw0k z)=V6-m_Herj-?h3+Kjzi_Zi;`!<@W6oy{X54X{fp>9J?4>J&Pvs2f-1GV&2YkSJ50 zTP@d>uH2n>Pqpj9uydzu*fO*|Fag8-Quq52TLo0=kFD-847t+Qu&J z&sYjR!1ZWt5_b?28(82zZ+R^@JC`3X*UKlfTTlY*jX6JW#OHs)WA<26)&MA#o!eoq zMDwzuNst+~GyMx;>$eTI$}?>F;h%Taur9trT4n92yiiXCOw0wR(KKlM!J3QlRcqjA zdC4fJh@SCv_1jebzaavs#v{Bw*XWS?80b{wOrrO9fe(8&G$0+`Jq7Q#r!K$Lm%{4wYr-!dnsa&rf2w112jqP+8#6 zwP4Hep#18*Ha}$?cv(&bVN(% zL0RO=!S3UhlHqW8taR@MZ~>t=^`34|c7XN+2W$v2eo79raiJLzIP@*o@33{$aPmo6 zGtjff?m*qj4Y}pZB@%^lSRraNZO@nGwgEb-ApGL!r+a&-~A{1aKZ{1rG;dPF+7T($;+l{rP}Fc$!# z%O<5#=1-X#4A2OrI_pVfNdSvfU{>fi?1(7>WZ~9C&ag|B$ia2DWqbMBM7sR4VVWI( zgDDhm1s}VI?S)%qmiz6Wg$p$;?yMVj&96?0ua>DLD|oR$1`)Mrq|fGDXc8HO30(-3 z^oi%mXOxKy0FQ+%Z(|kX4WtLILqIT0qJCpe!h)7%u##|ymG1gqMzVqV3*@-7ctMC=(WmFAWi z`N-BvkC(MmW^DhV2uS`B3-f~`P73Aotkko&V7rL*9XZ(}8dtohrhPhJTU~&|qbfK5 zYV}yG2!=7yAwgzI(r}hQ1OcxaZy&35FAgM{<$ou}`$=+)#&o8*wl(JLdaPU}t^u4Q z#NR{Q>j*WVH+?=BBT;@0pK;@hk%|*J$aw`ci^mMa?8BPAI;Pznpp<;A)%~awn`Z=Q zPnlDCa0qURSN}Dk~F47A`eQwhR#I{!IBv z5a5$(W>VI@Kw1s$h&#z>>A~m$Hv(*)eSaFum4?b&DVZ1xydVT&?l4T6tU-iv{=3w0 zB0;_4-5wGe_uwGdbG`!A;)K(mOeC^gCa^>Py~Oli*Qt-t^pcpI`xF+sfkT8$r?>&f zJX9u`Rmkk?0qVa;cd1|UO-f6tvAmG)<#5l>SL2sJbFhnw%7E%Rr$*Tf){=A-cf>Wb z=B24pLX1at7}a-D z`2-4M{XO&~v>eT8@X8nyK$(fg2hG~-X5--65oTu3aoifcN2_1C&LR)rznvj%76;2& z*>B?1r3w@mOM7qGf!#TSWBeCx#t-P`8R^stSh}Dn90=^NiLn^{Y5y?dw~#_;PDwWD zv`tmmVI(m<;0fEJ^db!8r>I*+C!ClM3r>s6={j+<_db0oI7;W0wxZD} zsUR&2klOV)=;e%+hyI92>38N;kbPY;BXp@Ya!R>oYi2%IQb@&}^xYBt?&n=N4l@Yj zu_2!!qlH1B(<`Rg#)?Mgwknm|Mz3DP*6^>Vs0Q0D-bMe@rTYe&o!uEt(v*4air^da z!GpRouKJJk)Kwb`>h%eWLrv#-yC`#suu804t#Wjh;x{es+QY-I{m|QYs(Xyd4gG=H z7UNPr$=s^#)EC|Ty-&$@kcS*^TRuoJj*{P8ZN7=jX;i~%KxYYc;CAta>K>Z|w`^iK zwBIE`hl@bwAg@0NPIj;%mANm-HoT`W!M17J+`qitmPSI$OGWmrUq!AfJS!CDf`(Q) zV^u%zMuX09O?oZ+QQ3?z{iB1 z*NU=5+EvCOPOslr2q^ga+HLH*56pqHvL4I@XEyZb?qE9qre0B#0-E8a+o(VCfTO;e ztHvlh#oZ=h(pAS?1G$`Bo`{5JSf@`veQ8RnKBK+WKFKQR(Cvs%rdAeZyq#G2R5wp0 zlWv#*YPhJjY<&M!>i&1l^g6r^=Y zq&MC+T&BU$d?i34b5wGX&)Pi)a=?^FiR`q$VyWck^!`pdUK;ty7m(8R4Ryh~GZR2@ z+a1mnPGyR~5ZwNG89!U6Y8-C-)cJh1li0D=IvN4BP(WX`a%)JAw4yZ*4 zw$nup$LKfO)!bEA&M+%RFE!;tfJ#}zu{>%(bncg^q02Zm>t?EF4o8*E&$@7;PB>xB5`{jMt#Ur`LLJc zfcg$aHTF{{CnydSkO4nmxLc`GFu5NtNys|c?S<;6z(OI)roz@wx2=T3oOE z2U7sM{m~wer~qP&fYa?1?jl}|>=#EX+OKk-V_#n$k8ai;g)R`ta!%qL?TynP%LNJw z^b*JXVQ+p3h`^Ta9GzuLy6>UrD4u;#+^~BwK0k1>`xYQdpl?=*ET4mca%Za~plkSq_bj;{Z#HnA+;aX#BqRFhbyR~G3f~CNL=M1zR z4_wn1`b>t`Ja;0A3ZUSyA-C0_>zgfMK_H=~jqn)1mJs2eg>PEU{4|McYN3D2k>}1N zPDN&9StPza9M7QADA4DRv|C3dTiwQHq?E^q*Rv(`tJQ<$!^Yz9`VAKu;puYW+*9&( z>a>I1QL&!R%*fgfJRE*9I=yAi+TnT>r7;brPkc+U1=vq(qk%sHn#9_zD?sVZYirGB z%QN(lg|I%rsB6)is^TDZs{(ut53!U2zP&dcNo6ht2=9{0SepJ9GLI|;Pstah#>FF9 zYlA8MgwZ-CRYL?&LqAnnQd3h$&6c!gQ)a<82=l{8x#S*hKR;{MS%iqsV|d_rn=e#k zp3b9ILi~zLub@0hIvKNde^jCAOf|`CNk!NT!Da6Q{xc8Pcm4MH6&HbivFs&G1JC;w zvSaX&x(=V6s&9GFfF)M78BF$HRs+Xxvp<4e8!dj!I5x~rncDsAHG9aMgvTb0zR~Pn z%6rPoh=-egORz)PiH+Q{7)}!z8r)Rp98SKw6O&ewjZ`4(@L{IpbJ$ndXcK8g+ro7O zI^KqWrFO3OGRl9-!gsWDe2BH1b8-c-ART!JNQ?Im{S{OR59jTM_-R5DG>HV*zcqVj z<)$R1X3+cRLQ7j>5$^za-yal1RD+Ae8>meXVb)aZsm1R}A{MkKLGzpGBJ2ieV+cwOthob<5OU%+ zsoc05y-BLik(kF(uScp~yZqs_8qI*_Oh_~o!_TyGE=m9!U&&nqzpJ zEvq*pen|8d=l27$NAZx&Ur)Inql%B?d@dWYeC}TCYN^y_>i+Q_3d`KUN3e z-c5hZ@e)-fl1Z~mhkH^g2gsF-blgG<4=vC5JKw>oO^k)!tVcGnP;w(Kc`42$81DYn zJ`xCKQBROlgnjm@Pq9AE_HDn!Ga;UgP91Qc|FPQ1)D4a&S_N87;kBL%!)8j#V#65- zFMI3D=HnzDAvKzJtZ__Pi(52094C8dx@CrctyL(D|2ku~OB*jn_{l51{P?qXU0 z%xjxb^U{)sX_dr3#Jcs;w_R9!PX=ZvE-uIL;MbTt)yhtz|7j2U!VlDBv|JqGN!3oV zE8cpncd~5cTh|;SlB=w-KO5z(C7ZNPT^Ot(m}1?1yur(lONSy%;LFv$1N|l4%~ki_ z$bjKj=LF+ZunZTl*pu%&EcB#F(jW7Bq=_w}Po~2?eyqzI3PBn%?MBwb$sC#s?^3@4h8+SmdmYz}!4OPoIKwdEajL**ZR@zV}u_ zeL|dw>;vBd^o_izz2$#D(MK|xf3K`I7274+)V_>7#+?(q>L`Jdl+AJL<@m|%(={1) znlTf;c!o8~`B63$jW5#WC$5E7z?fOT=~G$9+;*jv96;^Q8r(sQQ7lstB__Sh4c@LJ z(|v4{QnHs|rm=M{vk@16#^n&pSw3~6+mST%Qds_9EPVg` z?jeM@D*LfAS#;7%DCq7l))DfX;*@)`Vi!~IsI1c!2 zIVA8v@rg<-m{?UKpDlFBM>2~_n#wmh#<+h7_}Y?vEigHv0MKOZ5m#YWa4oe7xauliI@mCj7#(} z{D)lqWG|AUXgOo(8vPTX2V5F-WL(O2e^e@vV>r0BK(g23Z11L2Rv_#qW)U+1YI@6f zkL^^R7H(`sK6K&dSYT?Ksq?+{*~bV6fVvfwdfugv>3sQlk_E-MX5!=5?6E(7B?uXL2qL;ol-PO959Z*qp zytUVLc6fky*^$M&(};IMp}cm@QZ$4AK;4ZP6?3^OSA#XeI*@X}GyNrr)8s}s)uev- zvycC`-xX*7)A6D{yu>%3HVw;$x^4tu*JRCFl(*&)0FBGv{rSIYRvA|bW?pORp78t- zHmhfEY>p-rX~_TaBZ|Pv%xE&um&@Ppv!%nME6}()KPY(K0X6pP+RmrzA=iCcoIHcv zyMz>TtxSRq+C#N@@me0Lydl+>uoZDmF{sO4e$^+8KDaAST=bihp01}GZWWx5{o)sPsGIfJ;IiF>zicCSsXk7# zNJ~pqRnT0lNU>3S!zIN!r|?$@LKw#fb&X4`OrsKnpm?6z)pa-6qV^#L{NKYTu>;Gb zW++Od^ia4oT@6c)1IeLcg$iLX{StWa5NfsJ5 z_jmmYfRT2$H-XnUoXQP*?{2-IbkF&%98ndl$j_CbVJ9)dW!kyUQ}^minLDv_CKgkpR0BWE^~*+}%G0n_t`?yZ{9T4c0coX_xs0f0K0FqspafuVIzMIfhALE^?fMT2cq!?gS|rEvS$GQm?01j9tVxqDbLL~|y6tHo zl@=b)O?YY#i)0#|lUDAeJ#p=7f0sP|%BOer3QFf=?!SL%e!lSS8i)?;!2aZUPkwUd zLi#&qJCS|w0IuR`cP6^9bhmLf?TLR$Un{DR$~(Y|%DcT$j3bKMPTC55J5ud4F*~C@ zQe&Wzr#HPT9353r($~P*uwKcG(GITM>X(R^hnH@iWZ%i|6fqgz6CW+pdjhC_ii}>A z2ccLK2S5vy=I&Tpv1<9;m?mTUJPwKaP6>26O7tqPv-IE;#f^L96@IjHAfmvc9LqK$ zcr68Qa6Vq;f#6O!?jzE2e#RM0mK}I)I62h&59$8Gp*_WUjvp+(38|Cc`yy%yHF#u_ zwl*!Bp9s+HLBB7)E?seP3MG&j)6(_^Ld$#O2W7|o%xeef=K$B~N6ge0FJP@w`UDuZ zasi`0$RFvE6CAQsZ(WXZ03^QrFRpLi$`To%vu=TQ6 zwRVy-LV(`uz=J~i6>;jgmDUXrf~pmX45i7L6G8nJdT^!lXlElpc6+ST#AGa6F%0dG z5&B_@Eo5pF?vE{CnCfS)^L~511oX}QJ!K$-X$;hN!l^>1l>P4!pM_=O6yC;!olNLR zEMlzXZ-f@@pgpA^Zs0hWiTD5&c)MeN#BMd67lFz!kEnP4vgUq9A?+9il$RfMiS+&L z-jzjAP0vmL0>$;`)nTNJ!);5JW-ilD#oe@m&vP^9S`pq0gH9zb^lDccZKpO_E?NLG zE-T~Vj;(#BDWfx*31g-is7N@hz^B>q017=yV1UkcHvhCZrQ7?sX&ufVUvvaPVBwM9 zaUfWznG52-K%bSOcOtE-6h=O*r^P1;S3;3(rcn2l`F{r|Ps^HW&G_EXz*71`el{M` zF+|}6%&7@94^;{^`7QMLcLG*y!;w&)kJ0aS2B20E+z;jVKrli0wDnG}H24mX{iFf$ zqPCw8E(SYP!trR%F@KzAz<0)ir{bAGIWJtzoy{+|#IM7L2jU^QrF-5)!XVBE;u}ww zOrVgl;}?IF;NpU4O=%tIg6Wy0^0r5*SURIv-b{KbHXWQf>flF-PpO5{fzN)lP=s49 z10qYjHvfKmsm1sf>d$Z<;$Zxk#3ix$++Or)T6YvfGEv}lwRgA~awiB`I%i+kHRl1l zemFv!p&v@Z*Sb|u2H0Qas!|dYIw@FYWOzMF0umJs5J;FW@>$_+!Bfm*v$O_n`m!5H zax;cIL;7ofS|-5)fK)18=}cl{L@hb@mYIh-&prOl@Q8FG4jL|1qw2C6--S7KhD;;# zbb7iwRj_W_8XdPoR%IPwr+=3ms8DY#qTA7^fEZF1!NEZwmQ?odY3Z1SyagpHL@L{# zv)03d-r`#kBBkfeZgR&XDYYiQ+lBSooq&sV(3EQ|Hp4oAAODwPA7TRW9Y5zr3fyI< zyY;~~RQXEJddnH{_?LXhCHlPAs$bzhU~>e*?|iuv9_8I(o6%fE%7XNqiM&s;%&!KJ zE-sLoT^DJV;0KYJmlMh(ho+qCq^g~Z2MR>Whp;oECeL^&|5R=qeEK=1C)h4eQEcrb zyR^mMe+8#{_3tW^g>)S8p=5-9P?w|lU6gZPW#1FyIWt#;MMIYXy1Id|iT&FNZf69zup6(tW(Of3w7;AVN@0c^!*5s{G3*--#)Jwyv-x4Zs zhCF>%^1QIrrTl~O0#U}Jv0@nxNp&A{hNc5@`cuDEId9lL)J}ilG>amz38mbW2JUl? z`gnU2K=bs4X$l|h{(e=_Ef0BXu84Ef?46c}imG|L2H%uGX<;)b!}>9vSiA?QoO2v+ z_XbD0gE_Pk@LSp@8{#khe|PLYhXi`ftUcY5YSrV5YCs-GgjBN~CpKvR3Rx-oJF?#0 zmn5M-Z*G+Y^;p?}I_+D56zd=lP-S2G1$u+HFx;ZAmJE~Rb6d+Abt2V`ZzL&U0wpH? z%i2=WubbI77FB_g*@1C#DZvIFsIPqQk}Z6?x*60N+4QuXbx zYMBkBfmRbRxtl2V{Ed0QfYiaP8N8=s$2kLry;!O=65m++hnIy6MO8RB!XA2@+F(^c zws{fkycb8Kd@V%Tu1N0JVSI`f$?2~RqWWTlx}Y@#>d>SrfI`&4SZdI!HFrxw^{BjQ zY8NR4XMseuPzN46nHkorwX7Vin$tAN%z5@*3%>%SjsA0@kugo`E<=6k9Na;*fl1ZCk zmTE2X&xk~R-4JUwhb5x^m5Txv&vWJabl`R{vR>cY8D))qf5BWJJ4%0=gu`_N z^5ULv3hN{{!Wuoqdp_)rX&ke_C45En6lm@`a*tutTf3;rvEDEz5?(_gy}7lyXU7V+69u%t2WGBFFxN09;B_8mytbaNuxrS z5vZ6K@T*DfSszJFkJAoxy_WrW$_0nnoi1>v{rIj?pmnuo1lk7xP*I8`>>`1Pm%v3% z8rc3_b6RT0JvOM@#IfvVn~th^*QnbijF6u6&%_M(Ck}Nk?(N(Osw~vzuO)U28<1;K zC5_|bhn-3HmrJ^41lpZ~ybkV7Z8cxU&0joH*e5nb(c7V(>i?>nWnJ?vLjwRx3=mkNxRNL5&m4Zc5ZU^dtZ#vX&d z|6W(267%@lO8nif3FEL@p>BV^3_FDq`p0US8|P@sDLs5Y%en56-4Dg3|LDtw)COP@ zmJT5MBPO%OEeuiRG`Q%QDTUk7o&e)R4J7xLQ8>*63d^VH;L6H>Odr|jV04klgp#}h z7Z<(qHKDBrOmM3=ziLjr2_CTJ=Ea$*3rW?EF!KH&j6dL?lN^{$ArLv`?5hYzqnuyy zyRIbDxOdxK_;}M=oL`QBY?;dvcACAE<^Ku0r75l zXdwB%MpR=zVz;7HC9VlQ#Q~YnkXTpV&i}+k{GErzp+~X0(g_3N6Tv+vPGzqQPW3Y| z`VIl;EjGm;3l#=hXh^;QJRWHtbdhWxP8S$nv}(ZF7HY+V zMK)=}6>Jz!E9k6(_WX)7W@{~tl*d97%sGF0=%sZoeZOA}(UtGJp(MfjLIlp2a@RNH zq^v|{W{$CcVKogWj5DbUK&1f}{V530_&5m9GW9+KW{`YB7<+Hk89a&m<+|zfyKos* z1-B=#0;Ur$ihjV2vE|&C(gxHgJQ@UN*QLjb8VQMAWY|JS%)o>*_Lsx&HODXi96;4L z@Sq4jS0M93!NDpuGJEyUH;BtF=p26CgFd*$xGB*Cs3<~TvchL<g()zV2soy1+SPTj&kY&=-1#tPCZnhg&A6HXh%&ywQ zlZT8QT=xi~CEN&+E!&gIo~MlXHhR-snm5Ead)}s(SGQ%99+`Z z?h-(QuriJZw6KL4_2Zdx|5O|iILxY!6A{gc&*87r#d@U`uo4J(G^k-k7DA({0Lj{- zSzaW`k0z>=j?8gJuJY)HJl9nNgJ1V%Zx|1eDR`f6&|~- z7+-7$v_&K>dFHNM>3Sv+vZ$Ta8Ek#xc3ul0gKq6At&&cOX77OHI{V>+|c)H9K1u>eU9$v}Z>i14A9#nZCaHbtAF(d|EWdu&&g| z#&mW2G$mv>(f!^{{IM9T*@wDk7opwj^_2UzYO$*gcObr|0n2ip?wFO?LRcx4eW(g#q-L>`^;1s!i@B=HrndEuIg`Hb$OX8w0f$&Mn z2ZrOeV&G9O#=;o);Y@Wusg{NJN=qPN5zhbpm(54Z>HU^Q?_vsuE|%Nbe>#i(LFBEY zhU~7)Q*TK(&UY1_s`_VC!B1N`u4|@_? z;G0>XQyVkB#%gnu@d|>TBY=JVY5SX3E3RQo^-8L?9qW$=QaZe4MLs``fIFr;+#wb) z4en3T2zuZTm3$5@j9`dBtBCp)U^?W-8iS{_4!`|wLNWl(__LYJ8Io$@3ZLUZW}#1m z7-#fd{Y)Ps4b!|ASRw6#J6LId00Q;r?c_$wu;puB|3-={E z%^Y>q46cG22vXO-2rm(mEa%umk1H{amuEi3q-)g>d{BX;e-_;lYXB{F!^8MyBVcyk zzI;=MMP!-yRi7ZF89m(>4RXCJk#o3i9eFTqJR$MPyO7bSm;rob-bGHiwYX5IOhsVD zo9B6Q`v(nn;JAsiTpzJm{?Ki&l{I#SxgvZ!0f=;(2xz{X$@7pLBAk$iH-)$nBFp6w z9v~SIwLB@9brvl9oA(d;!Rf4J)OeDF8AUl9P}H}BrR4^laPOz8SuD>yG+Uf99m-YY zt=GA?8vOO5^qX0LkT912T~%tktCs;NPtt=lnK9KKpOU!De65}w zz4tq{RV*8+WQlAheZ{KW$E|R$+6|_*ONW$p3PDwhg%0@K4x)Cm0|hF#LlPI1^@AI; zKa6*%mUsSj*~e`43+D8kb;eXyL_Krc)G+HVS}5Hl4j9bFy?rCHw!oE!@MN z;{zAaFpqcFDSyU2MFqFk{|*cBFKe0KxFc(DIDKaVyW$lOBjgDsJ?Z`V@!7iYw!Zg~ zT3jVL7SIFQoED{gdWo}mB~g$ttcOM!ANNjmVjI}jLLhOqO5svvfbyYuucn7TBJzd$ zP+9o&x>0?sHkCTyXg6V*_Q(9+=euEHH~}I_Ud>W--o_#vkgg2uCw-KJ7^zl9qq5(B zoFW{a0D$yRldU`EMok>Pp8dDm=fhA^w1Y9Y7IYRkMK*!gd!vK(_IAShK;vQiU;!ND z2EGi#qu)s#y>)}y|J@=m{eFj(nZ(5_%XC@_e}x=`hLB#7ro`_0OlCszLGve&&hVY_ z)T#q2V~3>~TXSX!yS9E0funH*9rnR$1cpSPDE5gB3TRXd6oiAq4VgGdMi6~H0BESz zP4M9LJD^)s4Q=XI322ppa6-SF=;hV@at49%aA&Doe84Us$~u$OPCZDDcBK=EMxi=k zyI4z>ZcliryXrG7eNz14TJKxk04e)lRk?sc-+?JAVj&DQVqUKt%wpek7bUt|Tg3i3 zmuI+gj61|gRFgk!G6hGy+%D!b1cyX9gesV_xUnLOhCDE*NaPP`2{St!_uRwVI(Z(5 zJXtu4#p;YFNdvd}>26Z7Hic3CC;@T(e<_BTWF)H7&lmhV9l!J-(&u@k^)Kwi1${V^ zp1F8cDaxbL>QuqOhiyPN3(x#ylgO z`FfrTOsUOk=c&BbWzZQ2@w$KyIAz@Yk4TR8+o{q?df?>>ok$LVN7h10Zc+4kS$})Y zryBZsg_;*?J>dbmMych8m+tY0HfVNb)D9DbOhjG- zdsCgn+7Tc~7Cv1LoYLfzd)KDAvDC(#=~nHW`BhoY&%vHiKa%x9CEa~=OyDt*IGEu* zsr-<0{^02Z)ANY+X=%O$qeUBg~rSl*dBkKJ5X+R;4=U!l&9!;csASL{9?Kc<_f z@CtSJU!a|@{5zvCo=sLfHqg>|<)8^#%7|srG~Z`xB5J#9!6Z~tl6R>@Lw^N=&j(&8 zTaK+(-8|fO=LOGKgI*DeS9isSa6&{_94V+%Vlyvx4y&3EC(-PeLO0^uLJ^)UMuQGC z*Df55?wbUcuLaH~_sSWA8?iwSuZo_cvy{Q+-tzMpn`8fX`cNs;(G+}&p>$3u2F4Lo zB52rH>Gd{1;5;=ZF3PkL zPFjJu(Shw9Pwgq#-Q6h~eb!>2Y$6^Ve?QIEK{-JM*)^1_wLve;C=Y9KJHfb6HePJ-5kx&n~2?H*dCs)He1|Cv+B7Hhb$1iuLIwIBQnVF zVRSgSASBqy|F$;^B!J7WvU~W{5^VyOsc;k7KDm#Xgiq7>+k!|qHrtT4#yv81dlFcR zn)N}Lg%)5T7!QDjgEnOfo@JUKq-2qbZ?=`~PXTD?>_I%PhQ}EsjDU5Q~~5BamW8>A2XncSetGV9T6NG~?d&gNHw|nq5cBB=3QmmT3T}K$-Dy=cygUBe@U9t5HaK@LU76ZBc6WwT z=d+=1t0AL%J6(3(aGGiAWkgEw?gWrMXGKSCe0*=dLX=iE&Q5S%7lGH!?HQQV8bWI> zH?lNWKdmBGPrM>ji&%GZGZ`B*5VkFsVZP`+8K6m2!v1;U(rt3+5+va)eIF%g5PV{3 z4})!8m??rV{fd|j=(Y~eji;o-&yKJ+-;Oh7eU(Y&B1H3<`nbNFP^LR`J#5wpr$h~4 z6<`taI>|)!Pkn09diYa=P-=#JbDwg-hcDidRyhE)+M8ib18T_{OXQxduakH=>Gw6q z88m!1oA2FbZPCft193F5+rx?DUn5a9`@}q?2GiQocGkgSN`3hWCDH2?Y+Ne5Lx0C` zl4O5XN%+QpX`>2sdtD@$_ZRS#+1)yH5?>fCcuEqG@+UyI{TQtu{QGKFUEeI9kMtWF zKUBYcUk`45|A?bxPu5}yAU*sqC}69$Y%PtIIF&An{%-A+{LnPYVnQbL15OMKETHp` zF)ly{ogeHXtI3qBj3vLGS%aWh_P^H!WpLTP2Ks;7$FOdjkbGkh^AOSWpJ)xq1veP``gJ&!S4U@7y5Nf>gbLvG#uH2(UP0oUWsM^A%k9?6Ry)>36E$U*`W&I{#U!6zj-1^eu|iO$?#V?lHSY*)wl)v*Z8YgN&Kwr^ikkINPlGH z2qIr;3$55fno6hn-ly|ws>ndpPWkk?2sJ(0xP!F|yxNH_(ug1uf+&*Ig;+36$&$IZ zxLf?HP~ZYwR@sGv`EK7lL2_pe)Q!WY@$cz?#Hgs4s}%8yX0?T_6ptig=HXp_aXhSU zx(rVR*qj#xGqA$hths7@uEp!b(|L5cloQP=PkS;fT*9azKKLV}Wn(~%dV`76Ah=t< z)qORn$5j|hoPn@4FYb<^A=q7q)dVInBj#w2l*&7KVyle*J6johDs&=S6 zH4sGMJ~?718j#iGHOGFjy}<4iW^lu1gsi^4v-FUHF>i|QzH-cLC%zb%oGP!uIb?lG zM44^|)hug!A+?^-UkYotv+c<3{Vx>W2alXY&?EsL+iqXC-($m0gQed@;h`Mx8p);c zQ#hKhljII3#MFb<66&&bq>>-7UlyK!D*srwM7Cb~#K)x>jO=8i-DvrSC^Sd#ITmX zePgCmx5F-XhNazezres10L7AlWL0}1VoS=b zVwdx*emPl6)WdO9q>L84;AWEdboLRWqiLrJz3LC6ja{lT_qkDk(KN|wwittgw+Ow?@7W6N!cYo zt_GuiWZAwP`n>STS-NtFO#{#v%LeYC;}<@cisjgNV}(ki^MJl$IJnR%R0C$%hZJyN zKBb>6;*@Y#v2uQ7LfN<0-yx`!qWAr<`FDVBa!Cs!rzWSyoBv4l_0+wmeuuCg%|LV%H84|xN6*)5 z*Oem!FM`=zHooCn#CRK@20X_gWg?~zgR~E#(oW{)BbdVaEx)UPP(cp&ptrn6nj0}| z`bGJOZV!3h`p43m&TH$QAi9fAnsOXm$)6)M4VAP1tXqSqzLy$UHUOKi64YEe~=3OgUw@ z*1235A~9zsH+q>uw&*n4JrXWlydcx)ShuI=7-b+R~evAhy~g66M$Fga6@? zC{?I2EM6Sh!nN64K`__6DKu8`N(41&19Ucb^eWq}zNVFHqDJ*A&ao`Gyfn(0$;Okn zryM&yo-)`nn4Q7VuR978$PaZdhuTs~%}$n!^(O7d+8!YInGu?=&1|tj?gHCuRQp;L zza3Lxv=1F+IyQdx8?*Ja4lI$J`WaPra8I;5>ZNGlTRGPPGCUAvky7M}SbAb4Bwi8$ zre|1)QxklD5*mY*t-)E-yR{sAE65y~W=KN`j|&<)=#qux>aW%G#(~Bt`~Zs5fp?lK z#)c_!A~tTC8{Q~&ePlHWLT!=>{KHKx<+H+hqYpLGARlIjyE=P3gQk=V&L-7u}b2Aq-987%y{Q zOzId>6nrk?cWE8tksG2%|B!G^POivQ2paE=Or>33BiVJmqPc~kMn zi8D;7O0)z0E@sE1GC*n!7F7?PMN5mF=R)SQm}yHeoMVUJ9I#uIRq%c8?m#@0cJRG- z{?gA(vn5L=$kPI*mlT|o*$I1~?nqa~9wzU0!JiN!$j3&jQt+1wZ~Of2$#+%LVM&n@4-*DJJEq zdO;VBj^hn)@W_bqU5@sW{TI>obR-Weuw-hkqgVQAL(yW`#+?%pQwMn1Bq6Ln&o56N z1)mpFm)$js!;+lz?oTcrWcNwBAfIxbH-@<{OV+AP!qPvGTB`h3L~sbKkCIO}ds&cP zhB6uQ9NwP98r?Z1vKTDmE*n&lNVfK5mg(;9p&k=`UyUK-hAG3CN8n)Ob6GPA%lkjXHNvEg`9&TPMXSiQ43;0~fVbcJ@cgPd8mWvJBedX;UXbW|LdZl-B8 zwcLu?+)A_Xb-mSI@tjp)`6EmQqUFj84!VFy*3qSiLJu+dC4SH4+&DS^F2~gH-mOmb z1OmjR{gvvYGxA$%mGM6>GnTrXnluGfjy4aijz&}exvrd zuLw)SudPovmC>GmLl*%csxUJp!%%*7?qIC}D5|cF{XM8-i}w9FLLlhB*EhsqUT3Sj zK_X-T@~vUrpN**FjZg<= z{87E86YR#kC-OvIaDUiIn1G*PpQM{-HSEsbzNrZ;Io0Ue24?aTrinRuI6PG&^-AVn z2#6pWg1>BuFMf5uJ#K0ZuhP5s9JSTK?%Qwtrh{>tt2<=fwOSTy!XhRBeu%{$xY4zi zj@Icl-cX#sk5rt zLw3hHzkWV!*t88}&A~P8&>q=-!VBiQg|YYgvQP_T&6r9ZHtm>1{|`Mh{{Wp;@AO1j zuIK5#axP4Ye;cu3|LsQWZIyCBK41ycNjb=uOWTf+{XGzQ>{#^vKi2)LR_}_fkH`N%N@}i&r-&!YNj+k;fvFJzm%c+gNWO1C&5Cq0#cYKxh zWm;8YjGCqA1@RbXK|-j{CHH}jLaOQJvkF3hJRvBn=psHa|GJ2aCbunhvG33CXM71X zj{WbD6Xx27%J}xbV`k)Wb0O|Q19=_(34T883B`kp9$4hgBL1C}X93P?6LKv(yw3f6 zcIOB0n#p|A&H~-w$A4s)ri8Fr9u-pu%JJOrIw9h{>x=tzf%G~o_-`jILPh>l!VV%u z^m)|JvJB;j^F`3gR|NCu!NbnP-<pYzNjJUc;x3vYs*OkiZ$+ zJPuL*0hpSmp!gJ)-UjQ{3h97{;D;}6cg1hulfze!?O1X7t*L4YJEw7~`^I6ad&3=P zHihqpy1Y(@q@ND!l5lzX=$2p1Ncm4snM<=4nMgb@a-6JK_j9IMneM2#i9Mt$Mb=|X z2e+{>#tC$aN zg`fLa(>yc!U-lDWUp{( ze_}|{!!g=2m&7#Q7iz)n+=J9t4oJeG`9Sv{035S9YR1bgF2p~cR?FHWgk{2InV+$ ztPAepgVUy2t`Y)DM9%;)nl*cL~&N3ML#!p4v z(9A&1_bHAHnokYMJwvVE0FZzzITi$i6#lWUHN!^oCH&O{K7|?eo(20K%RS zDU8_xuw2sq)l>6p^k`fBXDw>`_TwCI98dFidXpK6DMJnPRqva@sh!Ao9B$?6f6 z4tuiQ96kLGr<aJ-^si#s~wdll8s6@+uJY7y_vk*VIF3uH)uq_E`kT0fq92_t6|x zjVo@$t|{)Ro}nH_QMT4fD4>@OqxEwx8CzdpCi11zO|SU0M_&339kP3X2k?!Kx8^C+ zl|Add-j`v0B_n8sM*zdEN4^6(-tzI}ORI@r!uh*fd?!D?Bns~~Y}G{=6I?QU&q6mP zGNw*T07M-?$Ne^9CUrstMFx38R^CWVV6|W6WBvh+r@L_T zBN05{xu=|vemGr{$LlJ0BF2e$NcmYjRSK0r$-74tdHyB8PSFk=Pc{c@vNnHtAw4^y z(&l38jF4Pq;&L;|`g5hBJYjF)vxu_b>xuh)wW{QN?(}`f{(}$UXqx;%38)nHFJFgs zC-_r-4-Uh4$FR(Y(?CSYqNNYTm@4SS)^y#x#(>8n;AfeQ=x;s=Y>0l*nIlP;x;L^Y zmhNcFo!4gQpr9iI)f(>VW&HpR9}LLs9P#@ERwCq#Y#gftjuYYS3x6iHxvC6*lYyx0 zhx)1HnhV_L9lFVnobNQ3aK~|26u(Ou#K-RGX{YHIEa|3(d7FR>NqTLutz%P~0%A1}UP;Bt7Bh z_XVq8za~5;4R@?2%1{?B)N7fg6eH~)@TD+!sPBoLN~V|*_PseB{ZIWzx=q%_JKQ#> zqPK4u`ZgLV26fF7Oc7g7B>*x%KDO;i??oS*4p)UfgBo!^tf`*>Kz*S{KxJU7b2;7rv(gdB!b(M z$y4tu#o$ph=vfDkGiXWXqF6dWr$*$*sTZo9NLcMaNDa{rR9`zlTRa%0manx2uPPt= zOERq|lg*OvTf?hvxC@GFz_&6vl7qf3KLF}4;2va}0-SP%OOUA4L+bzAoZ&(lAjC~} z%ln;>r`u!Y%mC%TVXAI7zqCOtmOnKlZeC1zE#Ve^`KuGy16F+tWTBvPh43TKwI!=$ z3i7o>#8I76Yx9x|CEJ;#$R>S{Sv+!`JC=#qf_v570z0SYdI)Eq>+)0%FO_i?)EjU` zc_%?B$DL?*IvPo7zjC#aJ^y){gZvhi3Jb55F}b5om2G#JRNW0b{;_a6J~R?{D0EvB zY?oPZv6s}(W}rxLPrHaZGaD(On~@)}P*?tU(%7W&8(bU(3?TW61B6xz4A4{q7xQfY z^T06xbZc&HfC_fy?+;B-M_hQ$l0<8FaVnK#vst64426;Gsa5pQa(sGIIQBfg+6j!;3FEE4qCIRM9wy>g($J|DrZ8O|w0zx$U+l!U8xm_T{xa z|K|KTrs&A20D85Dom(t^x8aIxg?*w@4M#dXQ5Uez$IbMrPJ@H;8@q`T(0x8c4&R}w zyv@EKnhAk^>&vnMbgC}0?}H?!*L10-;y976iV-46?*8;|QM3jO&Ci;*I_yp87eWHl zrum;&N3|%a#KsEH;*zvtAFtr}s)ax?Z-kaK&S% zwZAt9lw^{C^Z6%l z8G$TLL`#8$4`%+XIT|G@RLe`+u`Xk2<%jq*0stwWqpj50r>TYx)~c)ObB$ku#vcDN5o>T!V; zTi!ANIonhm&({`#ZwGUvBq}&{Rbsb*gm?Pl6V@P}?da@+UIME84GVb_*}a}ED9r?u zkc#p|I@LnjXk%E*`r?R~`dM8P16EulP^f9)4wqJT{BG~#L)uRVnV=|VeIuHyh?g5806m%yVK@Kg@sECLS5tV8 zM|-%?5bGGG(`36TAwQk=OKpm&xT>3o_>kf&cI>!hNe^3$2<0iLAWRij#Wk<*F{;j>5@LFp?jnKt;YII9*N7ZHs4x#`g%d-w zz?hz01R#!Hc9q27>mKCGOD6B4j}yV4kf}^Ta~_U-R``9Z@37st$Ow}~F0`D*V(6D8 zbasW;s(%Ff)ozgWTz5_;!6Gf)3bPAEhy_nS0J5kS5*n2(e?9U!(sj{^9o~@82f=Rb*NjfKvLqWL&L(^h$bXleO z0S&yg117=PpVUQ|DwZkc{M$>-2$yHXpDPWz{dg!D#Y8f)O%W7h1H0onFz8#1j~nWO zyA0>a51(FR8ne75zd4wZ{TUy{9IQwiis}d@Q_}$GShvqEt47nFwV#AzP!5Ul-<`+M zPPbxFo|<8rwABLQ;BO9oyO_ofs?&UEla7q$%pcc|sQ6JJ5^R?-+nVf<`BIdx z>d*L*>zXlhwmse90rSZR{Z2oiK_{cn#VS)Z8OpK97}XSFYakhQrRl`xP{Ra5pBHp$9k?3{PU++HKz>E?`8Fl<1` z#DlP2j9I9El+4B4^pvjl*_gF)VZHc-ZgzeU#)w|blZ-x?K$Kgcku)ZSjZn%nfja)= zC{H>e)N}#%@qY$1wPi@*0Whw6Il)UZNdcXtqi6dg%a_$u1}cic0HffK4$Fj?8>ol0&&VZmEQM8+;j;ZNE?FWwXLVjhL5FL62vx9}A@!iGuD>AN5ibLH^D$IA zwr&X2DsaqQbKQO#I0w8Pd%mV0{<+?n*A$_%Vlx*O*cz;tL_+bSn)CHl%X7GU`@h$ti@tm!5FV&}~4ZFLD6YGo|> zESQB(8=6-(bQxqyfc62%cbGj!sw#@aTy$3c(Kr0}5?TEzEBUbir<)UaT7|X1w0@-08(V{5R zv}exp1YzZGoBjQ+?$3qWF{9cnXP$n3;{|xWBC`Q@jjl&RQ$a$CTsN;+l2exx{sknw zLpk0k6AJ*9je;`9zOrp*{;$9(L4jV0t`k-E=yyS&Np{byIVb*}JFiu0lvp4WZFJG6 zX`H_+=xK5|_Bv7s_sXPJ91}I>t11uB-Sa&cZ!UI61WimKopwqu@@G$$@N@{iR~=t_ z6`u;+v-RROgW2Nf^x@4p+A^m1^(Fi&-pOuXsDmG~EL93*J^&gf?=8*Gon8Mh9K8V9 zf^viQW`vNIJ!eS*J^v@J&HVoTBvy7fvOTOHUw-4|0nG*0~wS}zEQ~5xM5f^ zZ}}95PRuc+mJwN8vy=WfJl!l>51zoVr-&fL$3(^4thj8xmp5ZOj4$i@ts|NEO*$m4 zOSx9?#OO!^jHcY%t|B9EWpt_s?4?LIiP&4cRDQ{Z#7a{4+45xnXhC|KG!AE?ys=M` zp>Y?2j!P-C<@up1TG|hjl6FpKwQ&Vf%Bjnp**{-Sv{TQI!Oq0r$tb_LjXU^b_a8OzEXX>rP##T$W0%H#N(8b^sKBxP%y zJ;4L<7UOGC$`xX@EoJ zCqS_0g82c%=xMw%0g?vwDXYOe!fZtOmBn`FgG^E(I zw<&dq-J#$?kc7T5dYGa+5O$h(Or7}|#shp9U<{MP!jtDMnjY*-16Et<9&35O|3Wb{ z&|~Y@GA_34e%emsetR+l=QUNh+}l3&m0R}Q!_Q&hG5v-mPbd>sj(uhHN$jexEp2^)w(rf?5F6gvtQCw)&QG>vd+B_mp7sW?L( z^BrV1^E)w$yc_8jutdRO)QZ{Lj8*ovT$%gs-U79q`m4-#yU@ylt_8@)RG)lIGU%Mt-1s@fsC7~(J9i$` zvJD4isJ6>n1%aB8*8;&|p=z3rynOPc46xvvi$!d~BsX=|j~fXSO35kC*5O?{V7x2o zfW^s>!@i|Ov2=~-fKjM<3xCgI$gwt9xNt)Nh*|&Zz^f&EY(EfUggktIlYNBX1O0e7 zK1FM=cvmD8*5+&+0!)yBM!TD%e3(|JS365ncpwQ_`U*2ReT#Agh73nQT`WSH89kBO zx|2d(?;|};!wbz^BHN#rPwC=Tm9)VPmWkZ%x5J*gc2dMM(fkEqdPMidl5gts#Sq82 z>434K%>n6QgjS0Uy{L`8>k|I+yUhz=iA*?w-zP3PVO z<>_mYJQ;<0=I=FA^BB1{ z#Q$U8p;n^u;3`A_8>oTpDHR;^uh>!q#~{_ZN9!V5nF$hz2V0?~Hm)*tlVEmKhBaoA zDNHDsV&hX6GF8laz;SA+7K!*l58#5CPZ!5lw7y}=Zx%mkEcXYcl|QCRxX0qHO`9`^ z$Bgk~0=FJ|u@v__l-Rej{MuN7AS0rN&_`r(QALp;iNt7j#nvNy7=vaYe_`CI!MzV3 zI!iK=fGY{O!}_`3yBjQstwE-LPBqKw4kj=N!P=B6IN|w^L(ALj<^iefX;)5VH!iXJ z?Y|hhlusb;*f39HnY<=%Qwn_4VC16nDDhahP3}HkFpydG zz;()s=nH6VrYzw?s_}Z^B3RlL^7y-3jE6#p@%D3iE#6)?n|gcm!*6frxeCx~i`pu^ z1q_}Tv!|vr>vw<&*87>;55KCFSQvigwd)MfIT!gu5i~sHGG-jjPtC9Hq7$MXvCl%C z!mUj`fAL66H_Md=H#CE%Ql0)66NaGil_Ai3nCd;kOw6+I;#O=CNnEM$BUo|oN9sc# zs_SbBMp{I_3x`4pz)hSy{>mZ~QXSE?NygUPhlvl|?@RpE4Nw2X>gN-QR$UY-^br$s z-(L=$q!v-`$%W`;Cy;z64t0<6E(34}o*zj0MsM1o|H!`1a3B@~rgu58Z$A+lLDa%g z8Nr+9XM}Ns@|ED*qP7@>g|3hcME-EqE09CP}_eK>h{3__PvS`p##IVui!6w(|ZZW3?a4UQD*>=c~>0$}$_Y zar?L;gudO*li7)5@I{?0&>43=sd#;{naIoaBg)5Y`=-2U1VitU$}aV?nHKwI`YO(wDFkDud&RBKTt6-z$#OBj}aK3D_+7M2=}o8`SZ5Iu)=cs}@d z%cK(v2U@WLV~zv{iwFmzh36PCNC$8BwG#-#oG+SQ#OtL__`Fgz4J$GvwBiqf$43#T zkIF+!g)U6ia~t%G1I0v$YcwR-yry2bAM&yh$}NEU@gS6PTdIouCK?vogwsb|gip#W zTi>=uQdJe(^6X3_4!iROU;C9#SaCY?khw?x0lai`97jCkOISbZ9Y%LGN=5sK%XBx1 zc6;K}r4yY(1rF;K9V}roR+m4;bD%3^9LD0+MG!$3lDLS?7WaTtm5Dy0c%fD83oxjg zgLAaG?frrf^1OsgizohFhu2gffEAvkgH;6Ic&G7Y{f@Q*a`_e}ktMdyFN0{WI2wOw zFcmi1v6GVUl8KWoD{VX4bXu%m(nl&k+3?mBTXPrGxu7q@_%wPK)7!heyi4T*`vZJj zE{S?n5`7Z$8>oF#n?(ys=h&S&Q7kDD=T>$+U-l9WNdgME`t6ZtbW}S@ z*Mv6)NY_%Ysp~4ZqpTYM!;GP3nM!ssN5m~)rf81u3uJg%%ve19d7Ci$P6zv#AO0oz zgI}TDXtiMY9$|jV>`hYJaFj(vV+7qg*40v_EHF^)qq1=C5h(lGZvyCvzHA zU~7Kzs*Lw1iVi%cj)Q1hl!bN$DsFp4x3uJoMa}G+>8l$+Q?2jD|Y2X5Yx(vxN z?JOMh5!I>j#N68#%CLN|IIj@eHNX@qjL^0(V)M-p4JjU&2+yoHD2@0v`F;G5k5hp@ zq!q9?k}cRteQ8UWM0n816!IK`30;AagV5YTOT~sa%7dscw}p!r9Qyyf05Ap0BaxFw z^M3-x_tJV!bN}vGW;_t1A^W#b{xd#e98gpza=2!6*LS`=$iA|tfPcOT=OzL3n+Hh8 z-yhFN-kpB1GD$HV01i8xB#+h9m1qHL8-2LNhqlcxvSS4Nsc5K5`_+G-19D32KVf%z zqKeU#TGhD~(JSO9Iefi>VQwx?**uQ)FwO9ZIm{oBmQ%<$M0#^x?9l%NqP;fLjNucaBasori8-awspHkGiC0z<(ztpdcqFulocIlC zktok6_WQvjy>vp0jgCDJuKpXM6TaE6Fte%yi1mTohuYwQ6vsNzAas%wxgh{=1WfJ# zGW6`Yjau=49QG1iXptEJCH(UMr(CRaDf0?i07w=)z~a*R_U=jXKl(`qlP} zWKgP8i*{LG6vn#^y|#y=!No5R@dUc>G*Jbu_K^Png6@m0QZxuShw-9xf2tN5G)wE zp@n4Gj9;=b{)k0FO)AvwNUYfEG+e@Lw=9#m2{n`GGJ{*MN15+3)cbG;(1$VorpOEO zrD(~>a}$I^#qkUbjD7L``F^4gK@9W#hg2Mri#ep<2ag3(qNBrzNzgiZ^%yrMwkEM| zT{#(>A75OwH}zt_T}fr{v_~_ZvM=%^kH%r3zeOizjHi~X!q(vD6GKb;BF*z2#r0i@ z(y4)~yL;Tv*>1x_VJ@u@owj2#1P)8;0l_a~zZ<^vH2isWU_(qT&u%og8)d^Na;s&tOraLg!OPTbqQp>4#dvw&_A)oTcbR1%w<_*sCf|csr*pa5&t_D zi&@6YiNGSxtZKkaY%bxamUy0)CPw^ZZPM9nC_8k5SQPF0T-MqzrrkC|CY`1UNWiMsV|n0tBqgzkM82R*G3CWt zCkmQp=LjlH9mNB#OLD~G;1nu$#KZ<@7pYZVAEkyR-zmf$Ph#!<2w2x7;2gxyA+Gs^ zc}NuNyNENU>Wt1lrG4k@HX?Ic^6sDwVlBDlCKH(d$*66;k3vZc!{*fZ%GxFHdIRGO z5&I{c%MM2W`kutjXu4@<6aDpxfTBosj2CC)CCq|RyW41|kZoTu zC_K77h{^RejiR5Az5rL9Bh%h0_9pCi9yECFX1P~|_#QQig3*`oMN*fAv%eTigmoNB z;+G3F)i<X`>BjMMUs>Nbk^Jc6Kax1 zxeCLl?GMzTc3Ph#zo-{r_9xAT{v|dn!UaMlmiZ@f$^N>Npdh71TQY2}g5v%3Uy$Yg z+6~c`Yo-|CHsj|+HM=#|0fAT7g`OqZ=TeRGA6SXBt~&n& zFEFkD1tMoQzIMMel74=;j&$&igR^xpdB=_oJ*JIINv??tf;$%pOn&c7;;h*7z4^Lq zX%X}MRYe@?&pYE<6;)K(jr4G*h8@8fA_k5Lw>*~4nIrcDi7i_}MBfqi;F!0f7xMld)$X!;% z+-{D{*T=}PL{>yuqq*b*oKeATNHRySfSdEtqra(0fV0dN7c_Gw?a_Hd*KNSrJmIYj z*njwSm=p#iukU{R870pXu+9gc9o+E~i7;f{#sC7rMNBqqK|YJA-JVA(LwT}=i>I^l zHEjX!m!~QkorlYxf0@^~Gucg}3btQe;p~uo_pD_VmIsOsF+w!qT;R_hB5qwD#Ps-J zh<$r}p^RM~jHBw*zQ|-&tIVnxi<0vg7&LyL6s++YvR1hdh?%R&C&IsN7{J36mGFx7 zTSv*dov^S)+Z>ZZU&iy%cbrlG?gBrFY4?>el<-eKPge3{4ZC#&6&^#r=}}nu?q$WDpoUmCqTHtY=y}RawhKX$pSuvbDp+# z+q>qb{nONS!kElO8OXq2c=-{vo;UP@Q6swvqYI z7$@xESo?h-52H>Pxkl$xa(j6SPbmM&@SPKu=<50Q)a|{y_GA>z{u&Ya$_N?y(5k&x zqMmi6qSLbM00~3sw5Vcss=iEplH?G>Xxh+keQ%6uSlHQThQ7~rD-;Ja=*?b|W+=$^r>2 zwby!q?Zk9~deq7h!ePSufgn31syI~vmCn+{H5pH#o1PZTTn4=m($jL?RtM)RahWrMd|J`Qs_RkNT%&u?oZKuKD#YQ z0@kG@9}IVdkVFRsT_%BrAo1hbEHXZeUj8o^>u_8?dma)G^vCN%Q@zi24|?Y@UR(CP z`IPHjsXpM0fm__l0njBz$7{VqT!I&YnBW6Ohv$IkC!8q0{G7sJHbg!YVByb0p!9r5 zWm=tW`F19E2PxJtAjEJXYC;f2@Xko;n=457qYIx*^lem3OJ*rVbyOhfy?w*(4;&zu zh6yjokTz%_?dB7YR4Hpcl^HfN^V7m$_Fzr)%V-0i_)T&iRv!hvYd4mDwNalOS9CX} z#LXM9guyad4~#jO9;TWW`gyq|?Dpb74$=|tLgJu(52sZXKUXgAVHqRsvLyLL1P7m9 z&ioND%x1+2^dXkN@>>el!}xjT@B+MIAA^8{jqT_R@(Q$w11;)dk0}IX-uaPH&GLs0 zY{zrxCQi-KTNxd=vU#b;;O?b&7sIKigEv!$+a-SrAwjDm+w!cF4#YN=aMlizgth^O z(wV$@bZ2D9NSkFBqcV`bRfhy_Yo^58aiqxFAx553Fli9xHu9Oevp1}9&;J$63{YiIR z;I`RE=*j>{65Y3sg@FNOoH_K7ejyGN{(@s4<28E#x5|EfF!VXSQAU~Sz3_;A+^<<1 z@9|6u>*C;w7w;$YR$PTF#?$MAb%j`b=M}Q!)0V{s8oh}*Dfc49)bIK2v1uo{%mT?8 zGJ4>(lsbw8Cye&!wBW$M|0=|i`Voj|-;fn~z82X1XQ8G`ls}k8xX9=SC$-}O>@b-O z&Q2SFkDh!jLTBKMxj@B&Wzu<9=H%C&y55Bk+hku?*cCIQ~& z!&cBL&gI*5qXG5AKtkn7I!1EhA7!(Y1KhUfeXqid-j{+Co&ppHcnD2w!I?QOmm3%m z3pln5E5-dKt;ff!{rEZY#lsgl0kIC*TOP7RA~`V2dDrz-=lo9Veo{4y6cx8OgrW); zCe|`!@N7VP4io8A4H@t(SRjFtM6dFxXX`lIfn$zn9!ADf2{ z(LTT<0e-cO-Boi6-RAkFUEzZF?NovZg&OCd6h4z8uIz1ZgUWIy12Z|004l0j=FX?7 zTn9N1%eXd;2itqsIkjWRckeOs$KlD6XgpRRxPCd(3?5Lxv0Beu1)e_y=^X=^Z3(`x z2x*fjSrtf?vU|GZ+WC|x1Tj&t;KnnXCZXDqsV?E}6 zznv_P3lXR)tQ4@_5IfwgJq4VZWU61szWB8GLTLf|9$9bTrta}P7n4el!k+5i8x+5Ix3DYu7@Xs@;W+j1?Q7y}nbvj3 z{6nnKttFL4sc5I7;hW`4#FK{zCUXkVm-7__-scEBp8x z`K|8M%AvE}purT=0f_!;St8E!!H?-*Dl$vM0vqNM!y98XL&%i;3oOT}Aq;id-+n-@ z0r-QU8f(*#nHc)N#gE~JOFWL^HJT^yV>he0lzrwmiQCV1%1Ekwgn+AiPu#%VX?m)a|wkH&4&aoS_K^% zwrd79Z|rvnVyt}4Ycr#zW@*zU5*A&c{a}o&={YZY|QE^Qe7aB6oj|ALi|Y?BHB*NnE#LcaVsT3Ex&( zXkFN+V-I+5=t(@zgxBdDK2im41$9H-b1fN6z9T&*yP|vRUa7_z6D5$jz45Fat5Kv|jPN)%~k%%anCBMX^QDZj+zyEQjeD zsU4i~do}aByynk|LsU zPr?a_YlLF}K%GA44XRs{&J^kDQze=#J>r{ZpR2xW?6cdA;WZhXn&1-1(Sq3UNG;e$ z=_UlfzBwxW6>kDfxGafA@f^?+Ah>Pl5o<(s)&u82OeKbaz)2+6N(>-9_?RQ0kK1p$ zLJ{)^(Pcvq4fZI0{1uS}uY0Orc7^;50Ax~&YT8EovRYex8T{=S(Eu%#`K$+BoJ4{a zrWr!tV8t@BKNU~Y^Hzg){%OQG_|Mq@xWB7(nQkyideJ$gbq6t%ROkG#axhBpTZx~} zQl>w&f4MvD+(%{6I9Z+EdiJl-GdW3UCbJ|0{#_svOQrt zstA@-O)0m4Fxw50^eUp}lc?QBDmNv|4&cVW$MG#|lO;PkC&YTe9j_zeaj3d=iKmul ze!2SZ*45u}Q4l}imgv8CK zIQYs9)mu|Tv(k&CF!J5}GqzlgRor)O8vIh__>3lUx#F~FIqUtk&Ma0pcrpa{ztdn1 z15~0?5QagAm+k&mAR-IETw(>cvS`Wq!{5SF0|+MJu<@YzD}ho@;kV zTrIC`xZCtSJ~pVrdnu6Gkpa{W=YLS~7o=D5uDizAq9G1#v+MgG{g=Mhejfx5GE0E+ zGwQrQB|i3LGXm^JawYCES$G;*fryF!hG8vEJ%_J2J=V7LifS)~9>o8ET~> zc(Lun&cvP|)1`h_cvPu)-FKt=`a8g|!Jr!^+{kxbxvq&*k~aWy6U>7xbQ&f%TPz%a z*pEdSbXSYb~!V!aIX^&Np*o0 zXWsAouUK$_V+)2_H6tJB3v-HX#s*FkU%zl4Q;H8uE!R}T@Nj!6q99iQldK{8oGtbp z!>6j7(h9J*WJ?!o&>!gfFpeJ+07*}W$eurfZiXZT`|Bm6g zU|y7pzz;!lStX^ch|lm)Smw8LZ+@z@`lw>!Pw4z)x~}bxsgH-;sUiik0b5A+IIe!b z^&0Rafni>?44_&(3ijy_SOo@i7X8=&36V_^5+Gz07i=8je)})z)fAEC>ki+btJG95 zl#JvY)4f7-gA7TaetwhFr~tL0u3#W1vN`%T)$<>qB$d$HS^UAq@e~A{6iRrYwPl$* z;r|`4LQs`vc;g$@c%JC+^?~XW;lFkd03i#;jaMYw?9P`QUI51av?pWLey>qNEp8G! zGQ<_bst}GNdGH5?IA5&aY43jG9^?qk2a+;&Aq)7@-`In+D{^;PgW%hXiIgj>Z(?{) zQqYtTGbx&t)YhE8ef3B?b1_)!>;C`HS_g`6&|@`>&m5;T~Mz*i4N!HOS^#LnN7E zNOi^Nv1b5#KWX@duFHC0p{7lX2c#G<$>C}jwb~tC9slpG#D;^jiQ0lJ>v3s=eoQBm z6}=5OGDK#Jv`Il--$Kx3>KY4Sl$$(MLVysbx5g+ClyYvcj6GlS1sJ@JrZF$)w=XkB zh^b3lKR=<|I}aMU3YSmmsba3eI4RTBWW(Ha+}_6buu_ z4wyu)My2`a=-VneZKYb3VpnI9+T$teAUp5`kZg$|YSagLu4PKCC%d0O1pT@QhCl)7aI|0s zse0*=ai?Noqq^+2*!9{~qYl4~VoKF=Ho7PA%?$+CLSMLlIJx|coW=P>hoQ}#rfD0< zy80=o7efR@V!sFFyCe&}6pT9z5IN&XH=U8=wQk8xi7EK>2@Ic((!+QlWIFTDBH;(E zQT4he=SFhbQK5g)_=PwVEw(O~rdn-Y=Yps<-@>@ex(SO=NZN%b! zr?Rj1ENAZQscFt$U7Inv&4~2zmClh@eg05{TrYt^lSwXr4L_hMcq~y051@ND2#xuJ zTYt{8Q5?sH5DX-I(w;?3#G@>r^o1AtdizMttFzPP*;C|c^P;Fy<6+ypR=c^KnxqPT zj$Z5;xcCvsif>s4q?X-?o(Ef%DjmVeeeX33e+m?F6GYWfDRoc_#O9S%V7o9%6UMu!&cxOgYeDnUcF{?%$&Xj zoje*t5jgMIt~-#uK=cuI3zGD~MCcX%Vk-O-_^F9v+wZc};t_p0d;8wAbkOwGf`+|w zkF@+-A{xo?3*iGcKg>M}G^!<{_|Pph^Lx1~P}WPTX;!SZb^A;_G?4CcW+Vc0zB-F7 zG!(Q9ytqw{XNA$55xsktHb+2IZljH=OQpd^h*v>t5QhgpH|6sbaI(JsVpxxduof<cQJZf-RE=*r7xS$Q}(SFv|iw&4q5(nMi?LFK?4jrg=%Nn#v z0Zu5}Kv!3H-v!XN)2ao@N^cjf1y?0QlVvXM&w#?^f(ph%aAGovPL*1JXVr9pB1Rk0 zCE6WundwyQ5dHOI=6BDy_S261O#A&#`bAPuaA4pokf^EMRNJPuk*j8RJD@HI7Z1?J z_~mWFPN=;&RiX_^GM2S5Ws;My1|nv<_a*MnelwWl#U*rTU}9d(jWBO3Qc>3{8LdTs z+Yv01Pi*m+T^}2LUL#r z;u{+#6=h5%n_?2csW-vDth57oI1R;+iAEG7x}L|$GO=zWjB~BOCu0}+N5E~DFO(9b zlbRG$k$fla6T?+!fA9h3sm}cG7LY$v)j~CbM2WGI2k=O$^q-O_ckP^JQ|lSeGkIa> z8$6TwUVH4Q%sh%xj@90IeQo}3Aj-Saa{bbxD}m6^cclI@K$5;yQiNkhVnXsBzxMMk z+n(wT;+85Lc_+F@QFVfI(7VaiV~=mWKSwz=DnZ2i!@{{Yaqfs>4eBQ{dvsfgntB*g zjH-ol)i0D1C{k}uv-$A~aR3XA%j#hYY zP|L1oNF0py@NR+&%+5j8d%y2>x6WJQw>t}3^YX!D-tq*TiO1bHHpskc=t^Y{7bPt! zj#%46&AQ-SjrU~fO&Tw9cS3%;z8<(wgdI%Mjz~a3K8bjgpoF-He9-MBIDy z?hZKW6KcnJhIa}Pks`8I|NkJ07}}u*_#9&2X$&cWk5bP%-qRFH#h~Kc94UNKnUXTDn51R2l`2tsQ$k*49s4!=rwjIc@-P%e(i z@nq+p#cwjY#EnTSeY#Xy5oem^HWcFgM!803>Xdt+oi8oIY-uQa;r)6MbMN#U0lzo# zeRyLjN93Wy+DI3s?JeWih769jHIY=_8}7yI969N0=wwq{PYh#|!m9*OHbp0|B0@IqZWZi6_T=xsuPC_&YK} zXuVy;<>c(%^>mdi9grI?>zC+`&L4-oPElU&zX#v@-vr2cIB&4MZ){f~CO9v0I95^ac=}Opbewo? zDD)gANebh;Cm?<0)LRnhl=y;S7>uO39_oJn7j=()-IdUFBb4T+wBQZU{5Mpo$2?|W z&svu-bn3eSv-Eoc=$iRW4h(wcN(87Z#DyARGSE+KA09 z4{ym!_kAPu5&Qiqvq}Q$5i6K~H}B@cqjmq{R#UrY_wGY<7gsm0ik&H4H*3$iq=VXp zX^02zXsXnpsvcw9$8kEnhBOJ)31f%(0ja#>%>T_W z-iC7j%}D{G($X-a-AfBhw2+EQoKTJM6Q%x%8rK(g^S9a8e#sVR0w2hSZA0CUqs95u z3M&RkD${QG?g2E5AC~f4Lu)u_1|2w_jy-Qb+K!u0N+Zcih1Qr*8Lm!E)rg~%5%T_${u@)&ww!lBez38Ew0*Q#n#@t6u1Z~6Z}KerC><9RB0Bqg z-ufy0P^k7YWWYY62?4qkU~Nc@Z3LNb7&Z^hdo!WCL~>t1C_WvD0ns?y8TMnWCL;Nt ztu^d7PTtvKveAgG0syG?qkn!vk_|u8-KBq1O`PV!ia4$=xR-Ah;r&Oj*ep;+&4z@; z)S+>2ftq*HEaXz5nLsdz-iL7LY*OHslN9|ObU5-uv_2fhwA516+_l(NAPpD|+;&ZOeMu zMU+&+)R2;*=_>u>g~y>mnmHEC)kMwb;?>xmKoi3sTpeY^z8Mlw#t~Onx^_Gy0?*SJ zT2%(V@(%0@A{n+}myqO#nU~`K``rEq>|{v#cu^U9BQimt5eP{Ef7m3&&)*OxaY(Oo z5Y~cUk|B6_9^|q4q>C7muNepAIVteJO(ZO%!!!`?fA5xmQ+r@ON~<9JKSZ5XKvZwo z?FEsL1_|j#TDperPNhK!k(Tc6?vn2AZYk+Tx@)MRdx*37f8ROhf(x$q?EOCPde-_a z1oP(P-$m`Y{fWgz+>9j03>{saVrW+L(l*K2>D_QZwK}J zpZN+MZse>~Ogn@CrgFZz@a6&G4}Lgbb2R<G!j&}b-#8c!=|67u^v(A@i=nm}< z+mY4a3sq-LBo>{wSPNT0g7dH&@WuL_EZnm!I^C=NFv>oWFw>x5 z^!*-~c1B#m8Kx6RIcdSDgg>r@e}njIPGaY+XbLVKo@y{d%Eb;VrJ_lIrZ28H_xLO& zdTl`;{>6e@XFWWFrQK=Ml@*67`Au5g#@cqKNr4wpKepDe|Le7Ciwu%m;?6+UT-kg< zi)x;v*}dVOd5fE{81HelU907{XKzlsdr(U!q=#C?Q)hRVt=9E?`k*@9UAd&=gj7hEV z-n*=Rc}jNvDJ7uRY|$ZY-}x*7-@-$9b;}i{%2<5nEY2R$ht*MdHVd#qq?+>Yl@@0K zKq5IHrD3#|J~=%2b&;ZAH~_K!q_6f+dgHWfj&7a+6R$)uLRKF;*jRI%=WVi* z%3d%}C^gcE%D;1&hKIj+<$viFhqW6fx2_bGIAHEYmi#oUB02Y{>12hpYc+Wjzq7gJ z?!H~&s_J@hCCz?s_jB;O*gW3v!L5&FyW8PnM)`7hB(znTcd3@YvuZluo#p`4?OBwG zjORFBn*X;I@I*YT+~o&MBpa9^k;2{IsQa1I){ig;IbdBPdfJwN06klVq`rQL29Kx{ zR!;&_#;dLe+$wB#>Hw*N@E+xz4Cwt_vLac~<`7+;A1vaHm(vii7i4CdWnX)|zbpVU z%XaIYvc)3-AEvsm>5L3wQ00;bi{3trvYVB?q7aNwBGf$F;bsC4=JKU-Xu<9k_2>*f&r3B9QzH!$$Pe3>{5jrR=>GL4t3AgiuCF zL!4UdBG~?Yl{3w3VKW(Cxb*+x0AQ0%fX%CF-46w19lJ>pgw(ieAofv<#-)i}J0_F= zj$%RFkdy_|xBHKUJ?f3)0uY5%+c0E$EpbT0>8Pu{zRnPoFY<8k)0R1nQ&WPOR~%KA z0>3xqvY3*#JWq|rX5S)D;LA8kr}|Bavgl4B*yNAB^WdSYx7>8p7pMEGuF+UAdak&J zqq)|d+IlLM@ivV+4-O7#F2Y76aC`e^1Q9s__I@*Kz`S;l^$% zS5>l{pOZmsfg}5XBg%NXU|r@qPgKQn)ZPG2P-fZ5#GIfnvZ1o`!Q34_ivNV?9`G^0 zGt9mGgs&yVEaY|Y-xC7CeiS3SNK2|=cv=Kx1~$khra8^jZy$j-Wzi=xlt}t}bGpD`ab7;)TiZhl9U=lzyb6TwsK<63bqDh4 zF-nT~cdWvGREZ?MKLg$o80Xq_J{~npvfpO*AI7DuOZ9$>37n|f_ zWI||w3SyQQzpn4m-E-hXz{j}bsC3IJep=_22r79WThSW%Vg9kh!&pv7=_YzxwwBd) z#F*CD(*Ge&2#3jDm#h1ya}%4R3K!_Ja)tXWyyfyj48#_uUE z5dux#zi$C87XyFIc7V9>gZXk}g+h9^@-9LnHjO;SO`eB{c9XT$i1c(OB;+A^bwP^y zl-nNPm;Os>N^T@LlPergtw(ah3@eUtMEd-iI8^fZdN>hVowZF)04I#dK{=N`K#Y9c zxog_W`wMCfaD8N^o;-;OgDTYrSa(6bX|+1kRdk&_ZeDB#kL(tvfrD(=PAVu9t5swY zb7d~p`iy#^N8BzNPA60-vBe05lN~(>z-8cd4qgDlJih{T1OR=&q%Q=MB-z8q2f8Q>mkJ>{H6zc zy3^C0@=vz?AyAqi@%Nk1gT)STIjk2%KJt9Hs)=bWAE+yb8r3CKsV&v=6z+5$H*zrvTwjuF|OJX-LfIQ>@- zlH`y6lnguN^;u5)U-MTo`XWuqx%CoNnJzn-_XT9h}uKLy>C|?S?!)L+-7K62< zsar;sT=CY+`<-|tcz|eBmQiKgCM@P&2wmETwptO3BLEVQb<#B8aZ>#SNqX_?lTC!3 zgFfG>wa!=wJ+ih19eK$t;nZ7BQ?1UovKc#;1Y`&&BEUcx%FK5Au4gf^77NYP7Cl+M zfZ*x}0A_@c3s8l@^fbCAh4a~$o{teV)AxAXLi<1k_e<`gXI1-!Y_F0`bW3J*{e{*- z6oU+J^W(*ceFX_epJev;KaisLTo&urgvSP6_a+vEe493M4UOXs?KrcoFE(<~WYUKw zU3&cP?{Mg;79L6yY3Uk~MAEpv_^Eeo)4CS2YY!&Sk>9Ai#ZT27rQj>b$HZaiFB|tEDC6^sWb4L-XZ(3 z2;I|rL^q=ULkD)%a|J!nODv%)t$lo()`P-tz!X?`JNuI;iJM+C)U&Ztyv`>pZrd{L zyvyx;B#G_kERh^I#VD%Az1e+=w_0bm8zX zOi`q#)BWnpyyZTDAj^eUIjNgg!v;_k(K`^`nx{B<8olj*Y1-6qCHqgNOOcKZGFX$~ zmRNs_^-?TFt-9p2!y7yQaNT|&=e8nn(uum$#pUySUOU2!+|}zzyw}Bd<(PENhwMy0 zt$9vzTki$6S`$}E6uifpJ13h2%y!td7uYoL>5w)4H$j| zWY#Ef!8h07dFC&Bh$NQ)sRDaWDbz6c?NfDww)VaHJ6~7-+;X74rS!FL#!N5n@LWxi zx1SMpxxpg&(AA6>Wi|{XQZWW!sY$K#qD9HVVTA?7swkq+rd4bn2ln0u1K3f?I0Vl; zO??gMAjfYXWuRf)-4w~c(CW{nb5M<&(ry2K%oV3WwmAYpOl|~SDtjLi8kP3_2{_G- zot_E7=CV|F>H=k@VB$uxgfnT4Fl? zAovjC*dF*Hj&tuO}T#|0$SigOs-cR1TNoW9j9ys<2GkYv%`xw8OL@QpR2~GBYK~Ds!O6iX-Jl1k_RUg<{=O zD8&QwBlYqRu~6$muq|d#`iWr`_DqdGbdwshgPG6nV(RpGWGCN3F_D^0?st|$u}6_g z=pknnEi?iBAkiuHCD?(A#PR*TI07kCfCl`8rlUW1ujep?l9mHP)j{Kwa0sqQZ0)j& zqx~Bd$Hp0MA-%4wp3A34KU7=w9BV$15Nkpj*uzAdq+IYNKmQCw$E(u$@uA4W@s?E8 z@&%6feCS79J(K+|M_m)|5Ze8 z-p>smWls&F{9IPu&PY2atEm}}&@RwFEGG*!$_S_BpasA>(W(07M~0A@UufOOi*oU@Mg%?Gvo)o7pWp4JZN zkgc?Fd|TqUmu7(e+GBl?>A{eJIe5cXN{mjEuQYqq&aGrM+(@?FAzCtf7@>`Ht@6w+ zajxOn<>cDmZfTq3;Bw6oA~H0#*(luhcR1ClJKE9Uc2VQjMSx9)kA3GI%R=fXA<5cp zJwv+F-3+LseGqOFd#9?~iz?a~6@(>ts7Pyr625?HgSe1USV93rkz<=kG4X@i6Wh&rcX}*H)a$1@<&UE3c1wx_}F4|IFe|5`iJCxuFWzPLs33kN$@yZ;Rz=-96<0Ixm+jwP*n zguk+~`Rr8=E`4eg2EQH-iqbSdOC!Y2E8jDB&$N@LPk|JD(@o;qATRL99a0y@j~$tr zI0fr3S|G>TaV*JAc_-yYR@?WkN^k;gC7ott;~R~T4l9Ey{o!ofB!WdJcUGtn0Et&{npk{NW<671br|I^Z<+YYVuoo9U}CG|3T+3|QQLQz~} zBE(q><cV2XYu!fRfZQBLh?lb>P#_~2fluzd>X`gNi|G#wt# zD4T`qV`^Oum<_R2J1pYT>CvmKrPa%icYXVMhp%_Buiw_zgf%9SxOCGV%o3tqqK$*@ zm^*h&y_;6m8WjE9;8AwmWCJKI$FMD4lSEH@C!SenL$=jei>Em({s?rmvheC2BfoF; zGhwJDkk;7(8pkiY1=sv#6_d*hj?H-!n)t4(>J8bSJIP)@E%$I{MeF*S zefI~*0;2JJ3Z>KG;caqs2VS%=38fD=W&1+$cL@@3-})MGU=NzXO^SW8ow%|9-0ThK z_z#D#xur_fReU$LH6$0cXepJg%vqk&T#&v^?PC*F{&wUzc*uq$8OsE#;N~1*FsMLh zXZy%I#<9yL=~}2@%Qsr7c6{7SQ&&b>K1OW-2#9o5t5-7d%FkItrMTdZOs?YdB!(fisi9TPsqSUm)To=O$!6LJ={-N&$=x*@AYD7!wF)I_rH2v-!=ZrC_Y%@sAH_?& zCA9803R!jQZ56b1FelNjZyrP<| zzvAK!lV{_(n?xp8`%{}`0IJROq3I0me-%SOkCOCp)j&060ZjyfOCQp_5hL` z zt_y`hgb;Q4Y16N|9TNIV3uZTBJUMUW?~nqX^tT9MLS{deVXiF)K!YsR3*|o|_vXw= zOtB6SQ68GiHcs#zF}lClGg*Dmi~;SVaFP;)NL8m-njjANGcu3$C5y|h8;PcJ+uJXl5cubA z+xZB7=e5VVgjp=aukXa>crSSEEfM=hyO-VaAhU=KYF$&9igRjA#ByuZP~^AkcDU7X zSP9A?S+?bmu|h{uOm&vc-d0%`8$VcG{PFLS(#Lf^ib=t(4q?b99==q{ zd!>3~{IZ&8FkK>?V27Y@ecH^{G_4gDuo#@o5i0;d2?tz1h8VFx+!%-*A9UgH*C*F7 z5XbS0In1W}mWcWk`aIVydtWXAMGQzx`MlXg`DpV3XeYcetxB(!qYykempRVT$XAOp z*ALNTk>=^)M20lxi1UvD9HygTClo>>?xy2Oxlo)~BT{Rs-J^sgeH$XCGVf=NPVWb2 zt*fE+S~IUtvpTCH@rah!kW4k2x@?ldBfBK|f$ zW6Vh!>4^|@U=DCYiin&fu2UrBmOACk8Sp@-=H{(sUb-wcSXVM&KJ`0?^bhg2=tEE}$JZ8rNC@0R26Q z1Hhxk-ZKDzW32~L@QtyQSI5&dqT89bnVMGOEZJ7sKAuO5#I<(fA<`Y-&=8O_)LXAE zr96E60_%@{>yNULKz`8StPSp1YN)$PeT>ykq?QfzKN?A))G5W;%M6{OkZoSPo3~r6 zF>JA%U#A9V&_d~_4&NQ3OG=?3yKI-70qJJ0+RPx;C<{aMOf#|}V`l*vc8JfAE#v;U z!0k#kLfLUAp12N($7;2eQ>=k775&ia^>PSEbRj_;Nv}$GLhf;}C7aFK+WTmf)zPeU zW?(;0Re|in!fQt|U!mWj&EZfa;2v{8HcsLQY|Q#zJ;HQ>XB36xCdbQt{OL#JbeE%| zRADsUqCnFMhAn&Cyr$Wmr#r?5uYdZ0{Uf005j48k_J-qPqs8oCN@GstX2E(&N_5_} z`(y5Fld@}d3AjY-P-e8s|2u8Q`W#(G@4WtRmYLYY6#HOmn~eJOk$#(&injf-1Dt`X zmF1iigu&fUk-2Rc`a`GVB~aSR`|&(kz2w4w#o30P`W`oSso^%#QO0+9!2S<6Rnl2G zd8{X5CNUy2f!lnDZ%n0&H6TeVQNGkfZT11;yUjAbRQ7DYk58v|G>JeyQ0Ymx#?D6i z{4r8I@2tG-<}Wl2Nugatbgz(jYW%Z!cU}Dv6#Rj$zAK$0Y_P-w>cAf4<37Q9(of0g zImJ-bt_q+{jMjebBTznjCa~%P8w+4r1oX_2`LUKCiOp2q{1++*!R^a^!}p2(xJ?%^ z!WvepRabaWNq+J@e=Ee@6%&W1qd0kig(I?YdwtfiUWV{(s@r1K>s6a~t9D7X?&*2@ z`n=cV0MBA4x}9q<>Q7BM7>(xpKw&*#AnCp0~qz(-2MW*eBpR@PQCw@|s4=YBa zQ#Ge@n6ucEN#>NPbydst;IfP!mk>J~Cii3@v^`x`LGH~LsASRBwaLXg_F}Z8oD-V= z@=LDm(_H9c9s1kVSLD6d%ab$vu;cd5E57H(bVARA{y!^1_Ad&bI!tb+b;B zq|T$jDU!c1ENcTm0iw^ku`3(q>Tx?AF9;(klq5ykue%*FmAMa}OpjO-9bTUh&kAfu zY-C%n8tzF2w>289RwDp=I74N?%d-bcR)2T-r4w3c_P;}80>f8TR}B(viqeB&%(uVu|o&91#tPFl6w_7VVX8t0R(QkE7c z?EJOkOS<`dnKh>t-|jFq95_RViRV5)tUJx7zTO^><&dELu4l6w8n2C@@cS$S*|Uw` z`J@nX0NbXYcqbByMIBdwbM(>v)?%qXzd%7>GSV_`&#`9u0xIfQ6!caEklbJ$uStI{ z(7Gojwwq3Hd5&uGD5SDcE1&+p@p1vOAIItY&R*q5kDWUcav}p_NIkx8xlxNjwROj+ z1RT|Z@5~;YG@F<39V>JC{bv;UnUqg!eBJ)a%&mF253b(Cl*Ta=qWu87_ZB6#S>Dev3m2w>{bTo)JyggF4}oIa0GN@$}E3;-Q>}|7uUR? zRx^2hltB%ObfhSnGrao`9`R&3>JN_lP);6X0_AQ!?&HAWyk|tWLWS-sMN7XW}FZ< z0X;WpZ&=2XPw?Nu(tr;hGtvp6hj+F8YUM^DM8MJ@T!hnq?=AYu(R{skMK5{g+;;U4Y26bEd#iC|s+Q0NW-F2;+%V3-VH*7;M$ z#OfcJw+ZUsatx9D!!_E{tXpLkq@;Ozc7eHa%I{WySX#@4o00H*I54?p1UUbcPenyH-nGm?ogYFU3@Hr3*Wgf-)ztj+MbRGa;^w z(V@-)!B~1Io%ey{PaD!#w~)H@hR6`_%I6Y>FZh~-)Z&yA7lW~s6(=OX*-`CGL>(@n z3(=@Uh5?D{+VXX8555TAN3!)^&gQ#SV8fi%d)s`Gj(f^s{VVaAZxp_)uG zYhAy)HUi^~e5H1S>|M)4w5#uWY98Xo?ipZ_h*`F78xvK(5a=hQAZO*WUitam>&z{L zmeEVAgh3Et@I*=Ur`SuCL6@Aq2i*r;r)NKfhn1=r`|%PcaHd*v9Vf_+_4=Ywl3x82 zd9vPf=g$Pqcny75>iW39-X|J$KXO4FR8|&89B~)HI511X#s=@vJwmW9k4*_#pU+qO z#VAi*K~PM|u-kUvwnzmV;FAehER)taV-E`I$MJn_z5?ssIM)pM1COq*Z{%mdf^*@n zTED|>^I-sLQ=#1e_>wbgdw%S?YgA8O6uj;L*}~t%VgW=$a`WbxMXE=~*P~5P)Tai` zh=`g&YbVi!Ko3soI-=}ABm1=f?JuCQL;@9Y2bj2Gx?U7dOav#w%ZzR$u;+)m!kY$j zJkjFl=AS6q8Uq|!OZF71mU*1p9zQ#mFy24fl+0E~)VI{+%<^u&S!}YCFf?nj)LM|X-witTzsmEwO7XvB>7rj6XqbjlcH>Kb}! zeZ!CjhTe;i#kocN9P>wlq2~|fS|Df*$OGCPRWuO{`9F%n8~Rj{U>gHuElH}D znD}4*n%(sbyK@R zF$uUZBa43lmkM3Db!B?N5zU>10haXnH3T4vUc=SxB^r+XH|fD=e`EqFUoJttUFV+n zKZ`JqpNZSM3m0S0J$0y@jo15L*QslzR9D*9=&>lGwrJLcb#^9_=SW>}HL2}9*jVqJ z_Yu-$rCRPHR-!MUA_J+bX9`1U?X0P{&{@2QO{kZfSR^Zuqa9woJw}o;L{PhLdU@_I zP#Y*sT05DrrZOQX5S4MajM9KmcgrpSN>ErN!5tw&3!ls0l}<-EXPE$xz*YaO8LLXs_Ka*S zDT>-*7(RPg-PO+FS+2gSt|psAPd-56ZmF3i@%v-q?1O6w@yNdC<9(`>aL0AHSe`9^GC73C^@w6uaQq-RsL?F4RH_Y73j2s*l+*$YFyyKj+UH{i5Pkg zTxghiSPQ@|`U8F+6Z9y-L|C6UxRw~pXM9Onz+R6gNv|B|eo$nvH++pcS(gIv>3^R*&i}_8}ur5d8aT^e>g^gxyz^R7+74sd?$Nqce50? zJL+_*1Koj#2l{S-svk@J$=^VEHG$W--CLx&jQIFPf3lTED;+Zc)$4j5)}Gwf6-37_ ziZ2_BM$D&4Xd9;RfyM1v>OuFhK=)aw%o;LIatcGK}>Ic7ng^y9yg{0Z%y3hOpVY&Z8^J;H*n4L@u1;2nC- z|61XGVEjR(Pl}>tH@mm^&sOUDX4^W11%CvMh({S9xu-L3?>EQ1X4fJ{-xo~@s(KUEVT9I&B=osU#R7ztiPe;>TDZF<07#Vdg7Ry18CnS8*dP9=R z?i{r;cZyar-I305qB}L2ayv_)1sz4Bds*mGz0Q8JTrXqkppd{zj_cc!7(456`f)*e z9SFXA#UDMwWU6AWtrM*AjHx3Zx^NYZBxYVs>Esa7h`tj1D{LM;y?yQEu|3U)`4 z1_tx_X$KYBtm7yiq}-2Z>XckE5sQ#UQF+KoBM?{*4T*DJqV3`K7(+<5rQJK+Z@Gez z#QFUa+;?K72o|G1QH7*sb?Z>6Ff?-NQChjpC1&?I@sA4?UA{jJ@yE%836Pc;UG#AW zm=}=&`!_}Tq>ac=LjM~jm&S%c2_kpCR7e$Yclr&6Vgo*Fm{*@QO2hF;3Nsv;66;-` zk&YX@?13ytuo6X(f{>f!!B7$xc@aV--7;aACOdDneB{nhp#jen07IPuXm{d>v=Yfpv|p7^8x;NY8x zZX;Q+czPyf8HOY|dhKiP#42Ucr78e}Lmrah;b@m+t)>~J-GVP{RQdDFt6PI2PHx*7 z=U9D1OQf5>0}ua+V?SZ516dWrYR*-A8|VW0Qa4%daz_CI1G1U9mV?PJu;gA_b(KG1 z&bTW=t}ewR)(%5JXb#Y6{kq^*FoUCmsb$$*M-&+3pV7KI6BItP!>2=o4tmBOwMejyEq!j!!`k|6~-t~o! zp2nzwy>s!?uz}|_=a`b8+gqY?+)-rX;aP9v?Lw)Yq~-Cib_XoKy{NA@TGWD21t?I) zOTWw{&XNu?kNruYJL-kXB{5PwRh2T$qgWQtLA^(&DsIt9FBx0`V8*i68h8bqO-A;| zeNZ72H{jxq&6ot{mN{1e_?`X5>S16S{u1I2kR2P5%F%@$m3!Mx@BbPH;+4rO>NwC6i0P7&OLoY22@$ zbpwhA(qUV%;W`!p;%1Mepydp@$9X@V!az{v#cQw-!nndG#S&RyFLMy&LLucHUKujk zw;x+4HmUWOz-H=}ruMyCDX^4S2Kmld*cl8O@nrZsJ^%|Vt^ET)jN+1a3SnMuG?A%& zjvkxdk9f}pZerOzlVr*tONPqPMYc~?o4rlFow1QD3^$3f@57YR%)t7KL8sSRe=rgR zus`sBixC+!NAkYw!(UGgjq zJ;yH#n*%ox@~!V2d*}KjS^x7IVkjYEyo)>~n%el!-b)S#vYlvxi~eH`hcC}_iUoWZ z*1B!)jYB1{{rK&4w~DA-9KxxYD>Q*1FsZ*NV|{hAgCB6$`H7!BSD`a6if3022)=t( z=t>wEpEKPK=_t>HM-5|$=}-h^@K^%!lQsMqPmp7=P89(L!h0}f(NQh{O5Hn%R2~AZ z9q~Y{hRfqrI{p0;Uo%!50yXS)6!f|M!x}F}&`*SNY*@V7kNOzo37d3S)B48H(Syd= zWS9KgHF>Pd^cgSLB=UQT_Ab|`GfxAScrjn&(C>~NZ$v>5zIFg;W-5MPXofZf-zFD!Ke==yMQqoq7? zAjQFm}-RMTPMzW1=(gN+v@DzxcCcujc!spPe-xHmfd6d3*m&5r*W#>+N>C!MOj(UsnQ*XP`$sk~sIX*T;CSxs8K#!F~Y#-y=)#OT%ACk7?5)cFp(SaHgX zJ0&qs65+wqzrepK1Mmk@#6#^aLQ?JT63qc(={Q zV9PM3k_kP7bfO;6iB)V}l}>f|YU&Rr$Y|nN9L)Ds?iH1?1XuUansGe|_~-{+cfdti zUbP_MzF?jZugGF4BAr$Tx@0QsT%8~FU5yIim?-sS`$kqrk> zlMed;27}d=7NhGvV@yB4;BGAcuZ&&z0oZ(aqLlv9w3@b}PH*rAb)UPM7gv>86&fqJwA*+O*Sa5tmWba2s((qv$}Q8rxJ>nz;!*RQBKIKYNIyT_3Pld0 z(Rj@8JKy(AZ>*DcIoHIryiXfVvk!x;SAP$7;5jkUF=(+XOJHiFr3wRJz_58t02-Sb-;ZIkHlgTEv|6$;i zc`#GBtbQNtWO`i4v;ZcoiYlMyxyY%|2%ohg;h0;8bb~y5End$R60xbgDk1Tx=Jrag z1a*Y+0Q#PkLi*p52NXEF`2ljfk@*PuZn1w=^Q1lR`COK#iJkZ4DbM~v65KO^{^f>! z53zC4lhz~VkGyuqJf(=x6q_6yX?3E(pO#{9V(Zb|T+*J>04h%&-OsiY`yrl(93RN0 z1wtI5MojmA^EsP*qYx*|0BU)ytH0(!H9D4vv9#$DRZvz`eaf_>b=M47lJI>zGkg(1 zNNK$I9E_=;so7#z5c@t)#+wQ$dj1@O5CdsY74$1c?7}3No$uT4U936w76ro zE2Tk|Fvrw5litiTGVSSP+O1ehlw4;wAh9yWzX#N?Syl#`;)uEA;*0P&!2gR`URlA? zhtVk4#B7ay4z3q+y)fRqm*T2GagCy48HhHgLLgC-(LZ<)y0c9MMy2Ux8nZDOP7l+QAeBimT|o zX@Gfa7E1Lo)@IImb!YkXIo5sxs^6Nm*SixWa=|bswIwj(9B=a}6J8e|A6*Hcjh%iz z)nOs=O~42_&bx>#1zOYY)Y~bxzXz};UwRPE@=5_5OBLhFq=aGsDk)GCC99iF}iA!SNFj35x3#v3e5gv-oXvi{tWc%%0 zl(^It5Jbnc$z@`hW>#zc>@B&2d}=IFwOaMVw&gA@s+L!i!REwGx%30Ccxj}dD*VI& zi=GsbHOZsX{+-vr-W>(M$eta^PF$i4H`yMqC+Wa1VIOORa(r*mV*;Qm(W;G?AmW0x zM!qMzD_%{A!E3*&^Q07hB89YQ$wFHuP9d{l&aL|9IwibE4TY~q3vVs z!7}I`fB4pIhwT&0(kB2lHt{N%omO82+_8#_1;db93D#Drlo;tGvy0zEvs zz}aEHCae|epE|a1VRRdB4`!suymz^~mlFuD*!~#$$nHzgoZU};e;3fN4pf8%_+t0r z3Hp7K&vHNEr4;}CDO}3897e=ngu;)(NlHJv6WEpNQl5#(?|=hfc)ouo=iaarRK>qz zFOERVB6L+$tlCyVGNc|xqO#j!pgoQ3-+B!_TanYAVk?>DNjE>;ERTudtbIhvAy87) z5xdRJ?>5K2g(?C!@@8oNOx$h4Wza!F3#bm!xUxR~vr#fG*Zl#{se5KL7PagkK+8Mk z_qmASf(ga8UH9L2+2$OHO7P`{0!9Xs14U?uzJSai~rRO zRhL%;rC*PdvO$J<*v|G6n%MR!M% zMiJ^hr1>kC~Y zD}bM33@HGaNq!a??uki%p9f);_&WF;&Hf9<^&NSPZD@qAEj?Ustd86eR;ty2laT6f z)8&W}?*vreW@-0#rvER#*?}lbcKf=Y{RF!NBlm@;{{;m9t^@UxEsJt^zQ^L6<7dM0 zrnrx=#Od#1aGy&AG>ZO9LB%0zgo9=t!rwkIoI5n zf(5BR;?K!C)VLqO8Ll)qq6&PoinELFI%f3tl0G^p%4+0O9U1c>eO=}+xC2{?369+m zkAdA_I}_^Y!*`tf>s{VCKapRX4*s=hZ@|l<)jFbLVm$&0W?yX*9sUr@xAp#yYq&`- z|CkWt+zu{D+z>8M(flCY4-G{$W%doC=w5)8)EElTSH9uv{px<#4G>cNlJ9G%S;QpPf@{HIYh#8eUap)4Ikw{?R@N2zBuBg!#XB0~g@pmNE{B0u|$-t4P} z4sHb}@h1O7I%tXr!YkJ36dv;Qbt?)NWNLp}xJmP;huoqn5<55Vns&kW+o48lI>yZj z?%AdDtZ?Ki>4JGH*zUI%FG21^0#Ti)#vQ1|10C5wHN$hpp<~RuR@7!MaqW=~cT*LO zyJW>V13r@8yMfO_*tgR&owL-g*_q%Qw}VI`Ubg1L(z2VStV|C3BceftSIUn+5SL<3 zpZ{-Znhe>ry08aE69izWcgvnLaZ&&vRJ{XujG7oko(|HBu{y5No#=1*8pdmVEv{